import itertools
+import re
import struct
import time
import pytest
from cassandra.policies import FallthroughRetryPolicy
from cassandra.query import SimpleStatement
-from dtest import Tester, create_ks
+from dtest import Tester, create_ks, mk_bman_path
from distutils.version import LooseVersion
from thrift_bindings.thrift010.ttypes import \
ConsistencyLevel as ThriftConsistencyLevel
cluster.set_configuration_options(values=config)
if not cluster.nodelist():
- cluster.populate(nodes).start(wait_for_binary_proto=True)
+ cluster.populate(nodes).start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1, protocol_version=protocol_version, user=user, password=password)
"""
cluster = self.cluster
-
+ cluster.set_configuration_options({'enable_drop_compact_storage': 'true'})
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
[2, None, 2, None],
[3, None, 3, None]])
+ @since("4.0")
+ def test_truncate_failure(self):
+ """
+ @jira_ticket CASSANDRA-16208
+ Tests that if a TRUNCATE query fails on some replica, the coordinator will immediately return an error to the
+ client instead of waiting to time out because it couldn't get the necessary number of success acks.
+ """
+ cluster = self.cluster
+ cluster.populate(3, install_byteman=True).start()
+ node1, _, node3 = cluster.nodelist()
+ node3.byteman_submit([mk_bman_path('truncate_fail.btm')])
+
+ session = self.patient_exclusive_cql_connection(node1)
+ create_ks(session, 'ks', 3)
+
+ logger.debug("Creating data table")
+ session.execute("CREATE TABLE data (id int PRIMARY KEY, data text)")
+ session.execute("UPDATE data SET data = 'Awesome' WHERE id = 1")
+
+ self.fixture_dtest_setup.ignore_log_patterns = ['Dummy failure']
+ logger.debug("Truncating data table (error expected)")
+
+ thrown = False
+ exception = None
+ try:
+ session.execute("TRUNCATE data")
+ except Exception as e:
+ exception = e
+ thrown = True
+
+ assert thrown, "No exception has been thrown"
+ assert re.search("Truncate failed on replica /127.0.0.3", str(exception)) is not None
@since('3.2')
class AbortedQueryTester(CQLTester):
# introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds every
# CQL row iterated for non system queries, so that these queries take much longer to complete,
# see ReadCommand.withStateTracking()
- cluster.populate(1).start(wait_for_binary_proto=True,
- jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
+ cluster.populate(1).start(jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
"-Dcassandra.test.read_iteration_delay_ms=5"])
node = cluster.nodelist()[0]
session = self.patient_cql_connection(node)
'read_request_timeout_in_ms': 1000,
'range_request_timeout_in_ms': 1000})
- cluster.populate(1).start(wait_for_binary_proto=True,
- jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
+ cluster.populate(1).start(jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
"-Dcassandra.test.read_iteration_delay_ms=5"]) # see above for explanation
node = cluster.nodelist()[0]
session = self.patient_cql_connection(node)
# introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds during each
# iteration of non system queries, so that these queries take much longer to complete,
# see ReadCommand.withStateTracking()
- cluster.populate(1).start(wait_for_binary_proto=True,
- jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
+ cluster.populate(1).start(jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
"-Dcassandra.test.read_iteration_delay_ms=1"])
node = cluster.nodelist()[0]
session = self.patient_cql_connection(node)
# introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds during each
# iteration of non system queries, so that these queries take much longer to complete,
# see ReadCommand.withStateTracking()
- cluster.populate(1).start(wait_for_binary_proto=True,
- jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
+ cluster.populate(1).start(jvm_args=["-Dcassandra.monitoring_report_interval_ms=10",
"-Dcassandra.test.read_iteration_delay_ms=1"])
node = cluster.nodelist()[0]
session = self.patient_cql_connection(node)
CREATE TABLE {} (
k int,
v int,
+ l list<int>,
+ s set<int>,
+ m map<int, int>,
+ "Escaped_Name" int,
PRIMARY KEY(k)
);
""".format(table))
query="SELECT * FROM {} WHERE v <= 2 ALLOW FILTERING",
logged_query="SELECT \* FROM ks.{} WHERE v <= 2")
+ # test logging of slow queries with column selections
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT v FROM {}",
+ logged_query="SELECT v FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT l FROM {}",
+ logged_query="SELECT l FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT s FROM {}",
+ logged_query="SELECT s FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT m FROM {}",
+ logged_query="SELECT m FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT \"Escaped_Name\" FROM {}",
+ logged_query="SELECT \"Escaped_Name\" FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT v,s FROM {}",
+ logged_query="SELECT s, v FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT k,v,s FROM {}",
+ logged_query="SELECT s, v FROM ks.{}")
+
+ # test logging of slow queries with primary key-only column selections
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT k FROM {}",
+ logged_query="SELECT \* FROM ks.{}")
+
+ # test logging of slow queries with sub-selections (only supported since 4.0)
+ if node.cluster.version() >= '4.0':
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT s[0] FROM {}",
+ logged_query="SELECT s\[0\] FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT m[0] FROM {}",
+ logged_query="SELECT m\[0\] FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT k,v,s[0],m[1] FROM {}",
+ logged_query="SELECT m\[1\], s\[0\], v FROM ks.{}")
+
@staticmethod
def _assert_logs_slow_queries_with_wide_table(node, session, asc=True):
create_ks(session, 'ks', 1)
c2 int,
v int,
s int STATIC,
+ lt list<int>,
+ st set<int>,
+ mp map<int, int>,
+ "Escaped_Name" int,
PRIMARY KEY(k, c1, c2)
) WITH CLUSTERING ORDER BY (c1 {}, c2 {});
""".format(table, "ASC" if asc else "DESC", "ASC" if asc else "DESC"))
query="SELECT * FROM {} WHERE c1 IN (1) ALLOW FILTERING",
logged_query="SELECT \* FROM ks.{} WHERE c1 = 1")
TestCQLSlowQuery._assert_logs(node, session, table,
- query="SELECT * FROM {} WHERE c1 IN (1, 2) ALLOW FILTERING",
- logged_query="SELECT \* FROM ks.{} WHERE c1 IN \({}, {}\)"
+ query="SELECT v FROM {} WHERE c1 IN (1, 2) ALLOW FILTERING",
+ logged_query="SELECT v FROM ks.{} WHERE c1 IN \({}, {}\)"
.format(table, 1 if asc else 2, 2 if asc else 1))
TestCQLSlowQuery._assert_logs(node, session, table,
query="SELECT * FROM {} WHERE c1 > 0 ALLOW FILTERING",
query="SELECT * FROM {} WHERE s <= 2 ALLOW FILTERING",
logged_query="SELECT \* FROM ks.{} WHERE s <= 2")
+ # test logging of slow queries with column selections
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT s FROM {}",
+ logged_query="SELECT s FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT v FROM {}",
+ logged_query="SELECT v FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT lt FROM {}",
+ logged_query="SELECT lt FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT st FROM {}",
+ logged_query="SELECT st FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT mp FROM {}",
+ logged_query="SELECT mp FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT \"Escaped_Name\" FROM {}",
+ logged_query="SELECT \"Escaped_Name\" FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT v,s,st FROM {}",
+ logged_query="SELECT s, st, v FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT k,v,s,st FROM {}",
+ logged_query="SELECT s, st, v FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT k,c1,v,st FROM {}",
+ logged_query="SELECT st, v FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT k,c2,v,st FROM {}",
+ logged_query="SELECT st, v FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT k,c1,c2,v,st FROM {}",
+ logged_query="SELECT st, v FROM ks.{}")
+
+ # test logging of slow queries with primary key-only column selections
+ logged_query = "SELECT \* FROM ks.{}"
+ TestCQLSlowQuery._assert_logs(node, session, table, query="SELECT k FROM {}", logged_query=logged_query)
+ TestCQLSlowQuery._assert_logs(node, session, table, query="SELECT c1 FROM {}", logged_query=logged_query)
+ TestCQLSlowQuery._assert_logs(node, session, table, query="SELECT c2 FROM {}", logged_query=logged_query)
+ TestCQLSlowQuery._assert_logs(node, session, table, query="SELECT k,c1 FROM {}", logged_query=logged_query)
+ TestCQLSlowQuery._assert_logs(node, session, table, query="SELECT k,c1,c2 FROM {}", logged_query=logged_query)
+
+ # test logging of slow queries with column sub-selections (only supported since 4.0)
+ if node.cluster.version() >= '4.0':
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT st[0] FROM {}",
+ logged_query="SELECT st\[0\] FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT mp[0] FROM {}",
+ logged_query="SELECT mp\[0\] FROM ks.{}")
+ TestCQLSlowQuery._assert_logs(node, session, table,
+ query="SELECT k,c1,v,st[0],mp[1] FROM {}",
+ logged_query="SELECT mp\[1\], st\[0\], v FROM ks.{}")
+
@staticmethod
def _assert_logs(node, session, table, query, logged_query):
def fixture_post_initialize_cluster(self, fixture_dtest_setup):
cluster = fixture_dtest_setup.cluster
cluster.populate(3)
- cluster.start(wait_for_binary_proto=True)
+ cluster.start()
def get_lwttester_session(self):
node1 = self.cluster.nodelist()[0]