<exclude NAME="**/doc/antora.yml"/>
<exclude name="**/test/conf/cassandra.yaml"/>
<exclude name="**/test/conf/cassandra-old.yaml"/>
+ <exclude name="**/test/conf/cassandra-converters-special-cases-old-names.yaml"/>
+ <exclude name="**/test/conf/cassandra-converters-special-cases.yaml"/>
<exclude name="**/test/conf/cassandra_encryption.yaml"/>
<exclude name="**/test/conf/cdc.yaml"/>
<exclude name="**/test/conf/commitlog_compression_LZ4.yaml"/>
requires:
- start_j8_jvm_dtests
- j8_build
+ - j8_simulator_dtests:
+ requires:
+ - start_j8_jvm_dtests
+ - j8_build
- j8_jvm_dtests_vnode:
requires:
- start_j8_jvm_dtests
- j8_unit_tests:
requires:
- j8_build
+ - j8_simulator_dtests:
+ requires:
+ - j8_build
- j8_jvm_dtests:
requires:
- j8_build
- log_environment
- run_parallel_junit_tests
+ j8_simulator_dtests:
+ <<: *j8_small_executor
+ steps:
+ - attach_workspace:
+ at: /home/cassandra
+ - create_junit_containers
+ - log_environment
+ - run_simulator_tests
+
j8_jvm_dtests:
<<: *j8_small_par_executor
steps:
no_output_timeout: 15m
+ run_simulator_tests:
+ parameters:
+ no_output_timeout:
+ type: string
+ default: 30m
+ steps:
+ - run:
+ name: Run Simulator Tests
+ command: |
+ set -x
+ export PATH=$JAVA_HOME/bin:$PATH
+ time mv ~/cassandra /tmp
+ cd /tmp/cassandra
+ if [ -d ~/dtest_jars ]; then
+ cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+ fi
+ ant test-simulator-dtest
+ no_output_timeout: <<parameters.no_output_timeout>>
+ - store_test_results:
+ path: /tmp/cassandra/build/test/output/
+ - store_artifacts:
+ path: /tmp/cassandra/build/test/output
+ destination: junitxml
+ - store_artifacts:
+ path: /tmp/cassandra/build/test/logs
+ destination: logs
+
run_junit_tests:
parameters:
target:
---- config-2_1.yml 2022-05-30 12:06:34.000000000 -0400
-+++ config-2_1.yml.HIGHRES 2022-05-30 12:06:59.000000000 -0400
+--- config-2_1.yml 2022-05-30 12:09:35.000000000 -0400
++++ config-2_1.yml.HIGHRES 2022-05-30 12:10:16.000000000 -0400
@@ -105,14 +105,14 @@
j8_par_executor: &j8_par_executor
executor:
---- config-2_1.yml 2022-05-30 12:06:34.000000000 -0400
-+++ config-2_1.yml.MIDRES 2022-05-30 12:06:52.000000000 -0400
+--- config-2_1.yml 2022-05-30 12:09:35.000000000 -0400
++++ config-2_1.yml.MIDRES 2022-05-30 12:10:10.000000000 -0400
@@ -105,14 +105,14 @@
j8_par_executor: &j8_par_executor
executor:
- JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
- JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
- CASSANDRA_USE_JDK11: true
+ j8_simulator_dtests:
+ docker:
+ - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+ resource_class: medium
+ working_directory: ~/
+ shell: /bin/bash -eo pipefail -l
+ parallelism: 1
+ steps:
+ - attach_workspace:
+ at: /home/cassandra
+ - run:
+ name: Determine unit Tests to Run
+ command: |
+ # reminder: this code (along with all the steps) is independently executed on every circle container
+ # so the goal here is to get the circleci script to return the tests *this* container will run
+ # which we do via the `circleci` cli tool.
+
+ rm -fr ~/cassandra-dtest/upgrade_tests
+ echo "***java tests***"
+
+ # get all of our unit test filenames
+ set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+ # split up the unit tests into groups based on the number of containers we have
+ set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+ set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$" > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+ echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+ cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+ no_output_timeout: 15m
+ - run:
+ name: Log Environment Information
+ command: |
+ echo '*** id ***'
+ id
+ echo '*** cat /proc/cpuinfo ***'
+ cat /proc/cpuinfo
+ echo '*** free -m ***'
+ free -m
+ echo '*** df -m ***'
+ df -m
+ echo '*** ifconfig -a ***'
+ ifconfig -a
+ echo '*** uname -a ***'
+ uname -a
+ echo '*** mount ***'
+ mount
+ echo '*** env ***'
+ env
+ echo '*** java ***'
+ which java
+ java -version
+ - run:
+ name: Run Simulator Tests
+ command: |
+ set -x
+ export PATH=$JAVA_HOME/bin:$PATH
+ time mv ~/cassandra /tmp
+ cd /tmp/cassandra
+ if [ -d ~/dtest_jars ]; then
+ cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+ fi
+ ant test-simulator-dtest
+ no_output_timeout: 30m
+ - store_test_results:
+ path: /tmp/cassandra/build/test/output/
+ - store_artifacts:
+ path: /tmp/cassandra/build/test/output
+ destination: junitxml
+ - store_artifacts:
+ path: /tmp/cassandra/build/test/logs
+ destination: logs
+ environment:
+ - ANT_HOME: /usr/share/ant
+ - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+ - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+ - LANG: en_US.UTF-8
+ - KEEP_TEST_DIR: true
+ - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+ - PYTHONIOENCODING: utf-8
+ - PYTHONUNBUFFERED: true
+ - CASS_DRIVER_NO_EXTENSIONS: true
+ - CASS_DRIVER_NO_CYTHON: true
+ - CASSANDRA_SKIP_SYNC: true
+ - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+ - DTEST_BRANCH: trunk
+ - CCM_MAX_HEAP_SIZE: 1024M
+ - CCM_HEAP_NEWSIZE: 256M
+ - REPEATED_UTEST_TARGET: testsome
+ - REPEATED_UTEST_CLASS: null
+ - REPEATED_UTEST_METHODS: null
+ - REPEATED_UTEST_VNODES: false
+ - REPEATED_UTEST_COUNT: 100
+ - REPEATED_UTEST_STOP_ON_FAILURE: false
+ - REPEATED_DTEST_NAME: null
+ - REPEATED_DTEST_VNODES: false
+ - REPEATED_DTEST_COUNT: 100
+ - REPEATED_DTEST_STOP_ON_FAILURE: false
+ - REPEATED_UPGRADE_DTEST_NAME: null
+ - REPEATED_UPGRADE_DTEST_COUNT: 100
+ - REPEATED_UPGRADE_DTEST_STOP_ON_FAILURE: false
+ - REPEATED_JVM_UPGRADE_DTEST_CLASS: null
+ - REPEATED_JVM_UPGRADE_DTEST_METHODS: null
+ - REPEATED_JVM_UPGRADE_DTEST_COUNT: 100
+ - REPEATED_JVM_UPGRADE_DTEST_STOP_ON_FAILURE: false
+ - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+ - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
j8_cqlsh-dtests-py3-with-vnodes:
docker:
- image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
requires:
- start_j8_jvm_dtests
- j8_build
+ - j8_simulator_dtests:
+ requires:
+ - start_j8_jvm_dtests
+ - j8_build
- j8_jvm_dtests_vnode:
requires:
- start_j8_jvm_dtests
- j8_unit_tests:
requires:
- j8_build
+ - j8_simulator_dtests:
+ requires:
+ - j8_build
- j8_jvm_dtests:
requires:
- j8_build
- JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
- JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
- CASSANDRA_USE_JDK11: true
+ j8_simulator_dtests:
+ docker:
+ - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+ resource_class: medium
+ working_directory: ~/
+ shell: /bin/bash -eo pipefail -l
+ parallelism: 1
+ steps:
+ - attach_workspace:
+ at: /home/cassandra
+ - run:
+ name: Determine unit Tests to Run
+ command: |
+ # reminder: this code (along with all the steps) is independently executed on every circle container
+ # so the goal here is to get the circleci script to return the tests *this* container will run
+ # which we do via the `circleci` cli tool.
+
+ rm -fr ~/cassandra-dtest/upgrade_tests
+ echo "***java tests***"
+
+ # get all of our unit test filenames
+ set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+ # split up the unit tests into groups based on the number of containers we have
+ set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+ set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$" > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+ echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+ cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+ no_output_timeout: 15m
+ - run:
+ name: Log Environment Information
+ command: |
+ echo '*** id ***'
+ id
+ echo '*** cat /proc/cpuinfo ***'
+ cat /proc/cpuinfo
+ echo '*** free -m ***'
+ free -m
+ echo '*** df -m ***'
+ df -m
+ echo '*** ifconfig -a ***'
+ ifconfig -a
+ echo '*** uname -a ***'
+ uname -a
+ echo '*** mount ***'
+ mount
+ echo '*** env ***'
+ env
+ echo '*** java ***'
+ which java
+ java -version
+ - run:
+ name: Run Simulator Tests
+ command: |
+ set -x
+ export PATH=$JAVA_HOME/bin:$PATH
+ time mv ~/cassandra /tmp
+ cd /tmp/cassandra
+ if [ -d ~/dtest_jars ]; then
+ cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+ fi
+ ant test-simulator-dtest
+ no_output_timeout: 30m
+ - store_test_results:
+ path: /tmp/cassandra/build/test/output/
+ - store_artifacts:
+ path: /tmp/cassandra/build/test/output
+ destination: junitxml
+ - store_artifacts:
+ path: /tmp/cassandra/build/test/logs
+ destination: logs
+ environment:
+ - ANT_HOME: /usr/share/ant
+ - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+ - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+ - LANG: en_US.UTF-8
+ - KEEP_TEST_DIR: true
+ - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+ - PYTHONIOENCODING: utf-8
+ - PYTHONUNBUFFERED: true
+ - CASS_DRIVER_NO_EXTENSIONS: true
+ - CASS_DRIVER_NO_CYTHON: true
+ - CASSANDRA_SKIP_SYNC: true
+ - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+ - DTEST_BRANCH: trunk
+ - CCM_MAX_HEAP_SIZE: 1024M
+ - CCM_HEAP_NEWSIZE: 256M
+ - REPEATED_UTEST_TARGET: testsome
+ - REPEATED_UTEST_CLASS: null
+ - REPEATED_UTEST_METHODS: null
+ - REPEATED_UTEST_VNODES: false
+ - REPEATED_UTEST_COUNT: 100
+ - REPEATED_UTEST_STOP_ON_FAILURE: false
+ - REPEATED_DTEST_NAME: null
+ - REPEATED_DTEST_VNODES: false
+ - REPEATED_DTEST_COUNT: 100
+ - REPEATED_DTEST_STOP_ON_FAILURE: false
+ - REPEATED_UPGRADE_DTEST_NAME: null
+ - REPEATED_UPGRADE_DTEST_COUNT: 100
+ - REPEATED_UPGRADE_DTEST_STOP_ON_FAILURE: false
+ - REPEATED_JVM_UPGRADE_DTEST_CLASS: null
+ - REPEATED_JVM_UPGRADE_DTEST_METHODS: null
+ - REPEATED_JVM_UPGRADE_DTEST_COUNT: 100
+ - REPEATED_JVM_UPGRADE_DTEST_STOP_ON_FAILURE: false
+ - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+ - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
j8_cqlsh-dtests-py3-with-vnodes:
docker:
- image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
requires:
- start_j8_jvm_dtests
- j8_build
+ - j8_simulator_dtests:
+ requires:
+ - start_j8_jvm_dtests
+ - j8_build
- j8_jvm_dtests_vnode:
requires:
- start_j8_jvm_dtests
- j8_unit_tests:
requires:
- j8_build
+ - j8_simulator_dtests:
+ requires:
+ - j8_build
- j8_jvm_dtests:
requires:
- j8_build
- JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
- JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
- CASSANDRA_USE_JDK11: true
+ j8_simulator_dtests:
+ docker:
+ - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+ resource_class: medium
+ working_directory: ~/
+ shell: /bin/bash -eo pipefail -l
+ parallelism: 1
+ steps:
+ - attach_workspace:
+ at: /home/cassandra
+ - run:
+ name: Determine unit Tests to Run
+ command: |
+ # reminder: this code (along with all the steps) is independently executed on every circle container
+ # so the goal here is to get the circleci script to return the tests *this* container will run
+ # which we do via the `circleci` cli tool.
+
+ rm -fr ~/cassandra-dtest/upgrade_tests
+ echo "***java tests***"
+
+ # get all of our unit test filenames
+ set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+ # split up the unit tests into groups based on the number of containers we have
+ set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+ set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$" > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+ echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+ cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+ no_output_timeout: 15m
+ - run:
+ name: Log Environment Information
+ command: |
+ echo '*** id ***'
+ id
+ echo '*** cat /proc/cpuinfo ***'
+ cat /proc/cpuinfo
+ echo '*** free -m ***'
+ free -m
+ echo '*** df -m ***'
+ df -m
+ echo '*** ifconfig -a ***'
+ ifconfig -a
+ echo '*** uname -a ***'
+ uname -a
+ echo '*** mount ***'
+ mount
+ echo '*** env ***'
+ env
+ echo '*** java ***'
+ which java
+ java -version
+ - run:
+ name: Run Simulator Tests
+ command: |
+ set -x
+ export PATH=$JAVA_HOME/bin:$PATH
+ time mv ~/cassandra /tmp
+ cd /tmp/cassandra
+ if [ -d ~/dtest_jars ]; then
+ cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+ fi
+ ant test-simulator-dtest
+ no_output_timeout: 30m
+ - store_test_results:
+ path: /tmp/cassandra/build/test/output/
+ - store_artifacts:
+ path: /tmp/cassandra/build/test/output
+ destination: junitxml
+ - store_artifacts:
+ path: /tmp/cassandra/build/test/logs
+ destination: logs
+ environment:
+ - ANT_HOME: /usr/share/ant
+ - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+ - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+ - LANG: en_US.UTF-8
+ - KEEP_TEST_DIR: true
+ - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+ - PYTHONIOENCODING: utf-8
+ - PYTHONUNBUFFERED: true
+ - CASS_DRIVER_NO_EXTENSIONS: true
+ - CASS_DRIVER_NO_CYTHON: true
+ - CASSANDRA_SKIP_SYNC: true
+ - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+ - DTEST_BRANCH: trunk
+ - CCM_MAX_HEAP_SIZE: 1024M
+ - CCM_HEAP_NEWSIZE: 256M
+ - REPEATED_UTEST_TARGET: testsome
+ - REPEATED_UTEST_CLASS: null
+ - REPEATED_UTEST_METHODS: null
+ - REPEATED_UTEST_VNODES: false
+ - REPEATED_UTEST_COUNT: 100
+ - REPEATED_UTEST_STOP_ON_FAILURE: false
+ - REPEATED_DTEST_NAME: null
+ - REPEATED_DTEST_VNODES: false
+ - REPEATED_DTEST_COUNT: 100
+ - REPEATED_DTEST_STOP_ON_FAILURE: false
+ - REPEATED_UPGRADE_DTEST_NAME: null
+ - REPEATED_UPGRADE_DTEST_COUNT: 100
+ - REPEATED_UPGRADE_DTEST_STOP_ON_FAILURE: false
+ - REPEATED_JVM_UPGRADE_DTEST_CLASS: null
+ - REPEATED_JVM_UPGRADE_DTEST_METHODS: null
+ - REPEATED_JVM_UPGRADE_DTEST_COUNT: 100
+ - REPEATED_JVM_UPGRADE_DTEST_STOP_ON_FAILURE: false
+ - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+ - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
j8_cqlsh-dtests-py3-with-vnodes:
docker:
- image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
requires:
- start_j8_jvm_dtests
- j8_build
+ - j8_simulator_dtests:
+ requires:
+ - start_j8_jvm_dtests
+ - j8_build
- j8_jvm_dtests_vnode:
requires:
- start_j8_jvm_dtests
- j8_unit_tests:
requires:
- j8_build
+ - j8_simulator_dtests:
+ requires:
+ - j8_build
- j8_jvm_dtests:
requires:
- j8_build
- JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
- JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
- CASSANDRA_USE_JDK11: true
+ j8_simulator_dtests:
+ docker:
+ - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+ resource_class: medium
+ working_directory: ~/
+ shell: /bin/bash -eo pipefail -l
+ parallelism: 1
+ steps:
+ - attach_workspace:
+ at: /home/cassandra
+ - run:
+ name: Determine unit Tests to Run
+ command: |
+ # reminder: this code (along with all the steps) is independently executed on every circle container
+ # so the goal here is to get the circleci script to return the tests *this* container will run
+ # which we do via the `circleci` cli tool.
+
+ rm -fr ~/cassandra-dtest/upgrade_tests
+ echo "***java tests***"
+
+ # get all of our unit test filenames
+ set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+ # split up the unit tests into groups based on the number of containers we have
+ set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+ set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$" > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+ echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+ cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+ no_output_timeout: 15m
+ - run:
+ name: Log Environment Information
+ command: |
+ echo '*** id ***'
+ id
+ echo '*** cat /proc/cpuinfo ***'
+ cat /proc/cpuinfo
+ echo '*** free -m ***'
+ free -m
+ echo '*** df -m ***'
+ df -m
+ echo '*** ifconfig -a ***'
+ ifconfig -a
+ echo '*** uname -a ***'
+ uname -a
+ echo '*** mount ***'
+ mount
+ echo '*** env ***'
+ env
+ echo '*** java ***'
+ which java
+ java -version
+ - run:
+ name: Run Simulator Tests
+ command: |
+ set -x
+ export PATH=$JAVA_HOME/bin:$PATH
+ time mv ~/cassandra /tmp
+ cd /tmp/cassandra
+ if [ -d ~/dtest_jars ]; then
+ cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+ fi
+ ant test-simulator-dtest
+ no_output_timeout: 30m
+ - store_test_results:
+ path: /tmp/cassandra/build/test/output/
+ - store_artifacts:
+ path: /tmp/cassandra/build/test/output
+ destination: junitxml
+ - store_artifacts:
+ path: /tmp/cassandra/build/test/logs
+ destination: logs
+ environment:
+ - ANT_HOME: /usr/share/ant
+ - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+ - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+ - LANG: en_US.UTF-8
+ - KEEP_TEST_DIR: true
+ - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+ - PYTHONIOENCODING: utf-8
+ - PYTHONUNBUFFERED: true
+ - CASS_DRIVER_NO_EXTENSIONS: true
+ - CASS_DRIVER_NO_CYTHON: true
+ - CASSANDRA_SKIP_SYNC: true
+ - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+ - DTEST_BRANCH: trunk
+ - CCM_MAX_HEAP_SIZE: 1024M
+ - CCM_HEAP_NEWSIZE: 256M
+ - REPEATED_UTEST_TARGET: testsome
+ - REPEATED_UTEST_CLASS: null
+ - REPEATED_UTEST_METHODS: null
+ - REPEATED_UTEST_VNODES: false
+ - REPEATED_UTEST_COUNT: 100
+ - REPEATED_UTEST_STOP_ON_FAILURE: false
+ - REPEATED_DTEST_NAME: null
+ - REPEATED_DTEST_VNODES: false
+ - REPEATED_DTEST_COUNT: 100
+ - REPEATED_DTEST_STOP_ON_FAILURE: false
+ - REPEATED_UPGRADE_DTEST_NAME: null
+ - REPEATED_UPGRADE_DTEST_COUNT: 100
+ - REPEATED_UPGRADE_DTEST_STOP_ON_FAILURE: false
+ - REPEATED_JVM_UPGRADE_DTEST_CLASS: null
+ - REPEATED_JVM_UPGRADE_DTEST_METHODS: null
+ - REPEATED_JVM_UPGRADE_DTEST_COUNT: 100
+ - REPEATED_JVM_UPGRADE_DTEST_STOP_ON_FAILURE: false
+ - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+ - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
j8_cqlsh-dtests-py3-with-vnodes:
docker:
- image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
requires:
- start_j8_jvm_dtests
- j8_build
+ - j8_simulator_dtests:
+ requires:
+ - start_j8_jvm_dtests
+ - j8_build
- j8_jvm_dtests_vnode:
requires:
- start_j8_jvm_dtests
- j8_unit_tests:
requires:
- j8_build
+ - j8_simulator_dtests:
+ requires:
+ - j8_build
- j8_jvm_dtests:
requires:
- j8_build
-4.1-alpha2
+4.2
+ * DataOutputBuffer#scratchBuffer can use off-heap or on-heap memory as a means to control memory allocations (CASSANDRA-16471)
+ * Add ability to read the TTLs and write times of the elements of a collection and/or UDT (CASSANDRA-8877)
+ * Removed Python < 2.7 support from formatting.py (CASSANDRA-17694)
+ * Cleanup pylint issues with pylexotron.py (CASSANDRA-17779)
+ * NPE bug in streaming checking if SSTable is being repaired (CASSANDRA-17801)
+ * Users of NativeLibrary should handle lack of JNA appropriately when running in client mode (CASSANDRA-17794)
+ * Warn on unknown directories found in system keyspace directory rather than kill node during startup checks (CASSANDRA-17777)
+ * Log duplicate rows sharing a partition key found in verify and scrub (CASSANDRA-17789)
+ * Add separate thread pool for Secondary Index building so it doesn't block compactions (CASSANDRA-17781)
+ * Added JMX call to getSSTableCountPerTWCSBucket for TWCS (CASSANDRA-17774)
+ * When doing a host replacement, -Dcassandra.broadcast_interval_ms is used to know when to check the ring but checks that the ring wasn't changed in -Dcassandra.ring_delay_ms, changes to ring delay should not depend on when we publish load stats (CASSANDRA-17776)
+ * When bootstrap fails, CassandraRoleManager may attempt to do read queries that fail with "Cannot read from a bootstrapping node", and increments unavailables counters (CASSANDRA-17754)
+ * Add guardrail to disallow DROP KEYSPACE commands (CASSANDRA-17767)
+ * Remove ephemeral snapshot marker file and introduce a flag to SnapshotManifest (CASSANDRA-16911)
+ * Add a virtual table that exposes currently running queries (CASSANDRA-15241)
+ * Allow sstableloader to specify table without relying on path (CASSANDRA-16584)
+ * Fix TestGossipingPropertyFileSnitch.test_prefer_local_reconnect_on_listen_address (CASSANDRA-17700)
+ * Add ByteComparable API (CASSANDRA-6936)
+ * Add guardrail for maximum replication factor (CASSANDRA-17500)
+ * Increment CQLSH to version 6.2.0 for release 4.2 (CASSANDRA-17646)
+ * Adding support to perform certificate based internode authentication (CASSANDRA-17661)
+ * Option to disable CDC writes of repaired data (CASSANDRA-17666)
+ * When a node is bootstrapping it gets the whole gossip state but applies in random order causing some cases where StorageService will fail causing an instance to not show up in TokenMetadata (CASSANDRA-17676)
+ * Add CQLSH command SHOW REPLICAS (CASSANDRA-17577)
+ * Add guardrail to allow disabling of SimpleStrategy (CASSANDRA-17647)
+ * Change default directory permission to 750 in packaging (CASSANDRA-17470)
+ * Adding support for TLS client authentication for internode communication (CASSANDRA-17513)
+ * Add new CQL function maxWritetime (CASSANDRA-17425)
+ * Add guardrail for ALTER TABLE ADD / DROP / REMOVE column operations (CASSANDRA-17495)
+ * Rename DisableFlag class to EnableFlag on guardrails (CASSANDRA-17544)
+Merged from 4.1:
+ * Fix a race condition where a keyspace can be oopened while it is being removed (CASSANDRA-17658)
+ * DatabaseDescriptor will set the default failure detector during client initialization (CASSANDRA-17782)
+ * Avoid initializing schema via SystemKeyspace.getPreferredIP() with the BulkLoader tool (CASSANDRA-17740)
+ * Uncomment prepared_statements_cache_size, key_cache_size, counter_cache_size, index_summary_capacity which were
+ commented out by mistake in a previous patch
+ Fix breaking change with cache_load_timeout; cache_load_timeout_seconds <=0 and cache_load_timeout=0 are equivalent
+ and they both mean disabled
+ Deprecate public method setRate(final double throughputMbPerSec) in Compaction Manager in favor of
+ setRateInBytes(final double throughputBytesPerSec)
+ Revert breaking change removal of StressCQLSSTableWriter.Builder.withBufferSizeInMB(int size). Deprecate it in favor
+ of StressCQLSSTableWriter.Builder.withBufferSizeInMiB(int size)
+ Fix precision issues, add new -m flag (for nodetool/setstreamthroughput, nodetool/setinterdcstreamthroughput,
+ nodetool/getstreamthroughput and nodetoo/getinterdcstreamthroughput), add new -d flags (nodetool/getstreamthroughput, nodetool/getinterdcstreamthroughput, nodetool/getcompactionthroughput)
+ Fix a bug with precision in nodetool/compactionstats
+ Deprecate StorageService methods and add new ones for stream_throughput_outbound, inter_dc_stream_throughput_outbound,
+ compaction_throughput_outbound in the JMX MBean `org.apache.cassandra.db:type=StorageService`
+ Removed getEntireSSTableStreamThroughputMebibytesPerSec in favor of new getEntireSSTableStreamThroughputMebibytesPerSecAsDouble
+ in the JMX MBean `org.apache.cassandra.db:type=StorageService`
+ Removed getEntireSSTableInterDCStreamThroughputMebibytesPerSec in favor of getEntireSSTableInterDCStreamThroughputMebibytesPerSecAsDouble
+ in the JMX MBean `org.apache.cassandra.db:type=StorageService` (CASSANDRA-17725)
+ * Fix sstable_preemptive_open_interval disabled value. sstable_preemptive_open_interval = null backward compatible with
+ sstable_preemptive_open_interval_in_mb = -1 (CASSANDRA-17737)
+ * Remove usages of Path#toFile() in the snapshot apparatus (CASSANDRA-17769)
+ * Fix Settings Virtual Table to update paxos_variant after startup and rename enable_uuid_sstable_identifiers to
+ uuid_sstable_identifiers_enabled as per our config naming conventions (CASSANDRA-17738)
+ * index_summary_resize_interval_in_minutes = -1 is equivalent to index_summary_resize_interval being set to null or
+ disabled. JMX MBean IndexSummaryManager, setResizeIntervalInMinutes method still takes resizeIntervalInMinutes = -1 for disabled (CASSANDRA-17735)
+ * min_tracked_partition_size_bytes parameter from 4.1 alpha1 was renamed to min_tracked_partition_size (CASSANDRA-17733)
+ * Remove commons-lang dependency during build runtime (CASSANDRA-17724)
+ * Relax synchronization on StreamSession#onError() to avoid deadlock (CASSANDRA-17706)
+ * Fix AbstractCell#toString throws MarshalException for cell in collection (CASSANDRA-17695)
+ * Add new vtable output option to compactionstats (CASSANDRA-17683)
+ * Fix commitLogUpperBound initialization in AbstractMemtableWithCommitlog (CASSANDRA-17587)
+ * Fix widening to long in getBatchSizeFailThreshold (CASSANDRA-17650)
+ * Fix widening from mebibytes to bytes in IntMebibytesBound (CASSANDRA-17716)
* Revert breaking change in nodetool clientstats and expose cient options through nodetool clientstats --client-options. (CASSANDRA-17715)
* Fix missed nowInSec values in QueryProcessor (CASSANDRA-17458)
* Revert removal of withBufferSizeInMB(int size) in CQLSSTableWriter.Builder class and deprecate it in favor of withBufferSizeInMiB(int size) (CASSANDRA-17675)
* Remove expired snapshots of dropped tables after restart (CASSANDRA-17619)
Merged from 4.0:
+ * Add 'noboolean' rpm build for older distros like CentOS7 (CASSANDRA-17765)
+ * Fix default value for compaction_throughput_mb_per_sec in Config class to match the one in cassandra.yaml (CASSANDRA-17790)
+ * Fix Setting Virtual Table - update after startup config properties gc_log_threshold_in_ms, gc_warn_threshold_in_ms,
+ conf.index_summary_capacity_in_mb, prepared_statements_cache_size_mb, key_cache_size_in_mb, counter_cache_size_in_mb
+ (CASSANDRA-17737)
+ * Clean up ScheduledExecutors, CommitLog, and MessagingService shutdown for in-JVM dtests (CASSANDRA-17731)
+ * Remove extra write to system table for prepared statements (CASSANDRA-17764)
+Merged from 3.11:
+ * Document usage of closed token intervals in manual compaction (CASSANDRA-17575)
+ * Creating of a keyspace on insufficient number of replicas should filter out gosspping-only members (CASSANDRA-17759)
+Merged from 3.0:
+ * Fix restarting of services on gossipping-only member (CASSANDRA-17752)
+
+
+4.0.5
+ * Utilise BTree improvements to reduce garbage and improve throughput (CASSANDRA-15511)
* SSL storage port in sstableloader is deprecated (CASSANDRA-17602)
* Fix counter write timeouts at ONE (CASSANDRA-17411)
* Fix NPE in getLocalPrimaryRangeForEndpoint (CASSANDRA-17680)
* Fix repair_request_timeout_in_ms and remove paxos_auto_repair_threshold_mb (CASSANDRA-17557)
* Incremental repair leaks SomeRepairFailedException after switch away from flatMap (CASSANDRA-17620)
* StorageService read threshold get methods throw NullPointerException due to not handling null configs (CASSANDRA-17593)
+Merged from 4.0:
+ * Ensure FileStreamTask cannot compromise shared channel proxy for system table when interrupted (CASSANDRA-17663)
+Merged from 3.11:
+Merged from 3.0:
+
+
+4.1
* Rename truncate_drop guardrail to drop_truncate_table (CASSANDRA-17592)
* nodetool enablefullquerylog can NPE when directory has no files (CASSANDRA-17595)
* Add auto_snapshot_ttl configuration (CASSANDRA-16790)
* GossiperTest.testHasVersion3Nodes didn't take into account trunk version changes, fixed to rely on latest version (CASSANDRA-16651)
* Update JNA library to 5.9.0 and snappy-java to version 1.1.8.4 (CASSANDRA-17040)
Merged from 4.0:
+ * silence benign SslClosedEngineException (CASSANDRA-17565)
Merged from 3.11:
Merged from 3.0:
* Fix issue where frozen maps may not be serialized in the correct order (CASSANDRA-17623)
'sstableloader' tool. You can upgrade the file format of your snapshots
using the provided 'sstableupgrade' tool.
+
+4.2
+===
+
+New features
+------------
+ - Added a new configuration cdc_on_repair_enabled to toggle whether CDC mutations are replayed through the
+ write path on streaming, e.g. repair. When enabled, CDC data streamed to the destination node will be written into
+ commit log first. When disabled, the streamed CDC data is written into SSTables just the same as normal streaming.
+ If this is set to false, streaming will be considerably faster however it's possible that, in extreme situations
+ (losing > quorum # nodes in a replica set), you may have data in your SSTables that never makes it to the CDC log.
+ The default is true/enabled. The configuration can be altered via JMX.
+ - Added support for reading the write times and TTLs of the elements of collections and UDTs, regardless of being
+ frozen or not. The CQL functions writetime, maxwritetime and ttl can now be applied to entire collections/UDTs,
+ single collection/UDT elements and slices of collection/UDT elements.
+ - Added a new CQL function, maxwritetime. It shows the largest unix timestamp that the data was written, similar to
+ its sibling CQL function, writetime.
+ - New Guardrails added:
+ - Whether ALTER TABLE commands are allowed to mutate columns
+ - Whether SimpleStrategy is allowed on keyspace creation or alteration
+ - Maximum replication factor
+ - Whether DROP KEYSPACE commands are allowed.
+ - It is possible to list ephemeral snapshots by nodetool listsnaphots command when flag "-e" is specified.
+
+Upgrading
+---------
+ - Emphemeral marker files for snapshots done by repairs are not created anymore,
+ there is a dedicated flag in snapshot manifest instead. On upgrade of a node to version 4.2, on node's start, in case there
+ are such ephemeral snapshots on disk, they will be deleted (same behaviour as before) and any new ephemeral snapshots
+ will stop to create ephemeral marker files as flag in a snapshot manifest was introduced instead.
+
+Deprecation
+-----------
+
+
4.1
===
native_transport_max_requests_per_second in cassandra.yaml.
- Support for pre hashing passwords on CQL DCL commands
- Expose all client options via system_views.clients and nodetool clientstats --client-options.
+ - Add new nodetool compactionstats --vtable option to match the sstable_tasks vtable.
- Support for String concatenation has been added through the + operator.
- New configuration max_hints_size_per_host to limit the size of local hints files per host in mebibytes. Setting to
non-positive value disables the limit, which is the default behavior. Setting to a positive value to ensure
Upgrading
---------
+ - `cache_load_timeout_seconds` being negative for disabled is equivalent to `cache_load_timeout` = 0 for disabled.
+ - `sstable_preemptive_open_interval_in_mb` being negative for disabled is equivalent to `sstable_preemptive_open_interval`
+ being null again. In the JMX MBean `org.apache.cassandra.db:type=StorageService`, the setter method
+ `setSSTablePreemptiveOpenIntervalInMB`still takes `intervalInMB` negative numbers for disabled.
+ - `enable_uuid_sstable_identifiers` parameter from 4.1 alpha1 was renamed to `uuid_sstable_identifiers_enabled`.
+ - `index_summary_resize_interval_in_minutes = -1` is equivalent to index_summary_resize_interval being set to `null` or
+ disabled. In the JMX MBean `org.apache.cassandra.db:type=IndexSummaryManager`, the setter method `setResizeIntervalInMinutes` still takes
+ `resizeIntervalInMinutes = -1` for disabled.
+ - min_tracked_partition_size_bytes parameter from 4.1 alpha1 was renamed to min_tracked_partition_size.
- Parameters of type data storage, duration and data rate cannot be set to Long.MAX_VALUE (former parameters of long type)
and Integer.MAX_VALUE (former parameters of int type). Those numbers are used during conversion between units to prevent
an overflow from happening. (CASSANDRA-17571)
Deprecation
-----------
- - `withBufferSizeInMB(int size)` in CQLSSTableWriter.Builder class is deprecated in favor of withBufferSizeInMiB(int size)
+ - In the JMX MBean `org.apache.cassandra.db:type=StorageService`: deprecate getter method `getStreamThroughputMbitPerSec`
+ in favor of getter method `getStreamThroughputMbitPerSecAsDouble`; deprecate getter method `getStreamThroughputMbPerSec`
+ in favor of getter methods `getStreamThroughputMebibytesPerSec` and `getStreamThroughputMebibytesPerSecAsDouble`;
+ deprecate getter method `getInterDCStreamThroughputMbitPerSec` in favor of getter method `getInterDCStreamThroughputMbitPerSecAsDouble`;
+ deprecate getter method `getInterDCStreamThroughputMbPerSec` in favor of getter methods `getInterDCStreamThroughputMebibytesPerSecAsDouble`;
+ deprecate getter method `getCompactionThroughputMbPerSec` in favor of getter methods `getCompactionThroughtputMibPerSecAsDouble`
+ and `getCompactionThroughtputBytesPerSec`; deprecate setter methods `setStreamThroughputMbPerSec` and `setStreamThroughputMbitPerSec`
+ in favor of `setStreamThroughputMebibytesPerSec`; deprecate setter methods `setInterDCStreamThroughputMbitPerSec` and
+ `setInterDCStreamThroughputMbPerSec` in favor of `setInterDCStreamThroughputMebibytesPerSec`. See CASSANDRA-17725 for further details.
+ - Deprecate public method `setRate(final double throughputMbPerSec)` in `Compaction Manager` in favor of
+ `setRateInBytes(final double throughputBytesPerSec)`
+ - `withBufferSizeInMB(int size)` in `StressCQLSSTableWriter.Builder` class is deprecated in favor of `withBufferSizeInMiB(int size)`
+ No change of functionality in the new one, only name change for clarity in regards to units and to follow naming
+ standartization.
+ - `withBufferSizeInMB(int size)` in `CQLSSTableWriter.Builder` class is deprecated in favor of `withBufferSizeInMiB(int size)`
No change of functionality in the new one, only name change for clarity in regards to units and to follow naming
standartization.
- The properties `keyspace_count_warn_threshold` and `table_count_warn_threshold` in cassandra.yaml have been
Upgrading
---------
+ - If you were on 4.0.1 - 4.0.5 and if you haven't set the compaction_thoroughput_mb_per_sec in your 4.0 cassandra.yaml
+ file but you relied on the internal default value,then compaction_throughput_mb_per_sec was equal to an old default
+ value of 16MiB/s in Cassandra 4.0. After CASSANDRA-17790 this is changed to 64MiB/s to match the default value in
+ cassandra.yaml. If you prefer the old one of 16MiB/s, you need to set it explicitly in your cassandra.yaml file.
- otc_coalescing_strategy, otc_coalescing_window_us, otc_coalescing_enough_coalesced_messages,
otc_backlog_expiration_interval_ms are deprecated and will be removed at earliest with next major release.
otc_coalescing_strategy is disabled since 3.11.
----
Connected to Test Cluster at localhost:9160.
-[cqlsh 6.0.0 | Cassandra 4.0.2 | CQL spec 3.4.5 | Native protocol v5]
+[cqlsh 6.2.0 | Cassandra 4.2-SNAPSHOT | CQL spec 3.4.6 | Native protocol v5]
Use HELP for help.
cqlsh>
----
UTF8 = 'utf-8'
description = "CQL Shell for Apache Cassandra"
-version = "6.1.0"
+version = "6.2.0"
readline = None
try:
def show_session(self, sessionid, partial_session=False):
print_trace_session(self, self.session, sessionid, partial_session)
+ def show_replicas(self, token_value, keyspace=None):
+ ks = self.current_keyspace if keyspace is None else keyspace
+ token_map = self.conn.metadata.token_map
+ nodes = token_map.get_replicas(ks, token_map.token_class(token_value))
+ addresses = [x.address for x in nodes]
+ print(f"{addresses}")
+
def get_connection_versions(self):
result, = self.session.execute("select * from system.local where key = 'local'")
vers = {
if parsed:
self.printerr('Improper %s command (problem at %r).' % (cmdword, parsed.remainder[0]))
else:
- self.printerr('Improper %s command.' % cmdword)
+ self.printerr(f'Improper {cmdword} command.')
def do_use(self, parsed):
ksname = parsed.get_binding('ksname')
SHOW SESSION <sessionid>
Pretty-prints the requested tracing session.
+
+ SHOW REPLICAS <token> (<keyspace>)
+
+ Lists the replica nodes by IP address for the given token. The current
+ keyspace is used if one is not specified.
"""
showwhat = parsed.get_binding('what').lower()
if showwhat == 'version':
elif showwhat.startswith('session'):
session_id = parsed.get_binding('sessionid').lower()
self.show_session(UUID(session_id))
+ elif showwhat.startswith('replicas'):
+ token_id = parsed.get_binding('token')
+ keyspace = parsed.get_binding('keyspace')
+ self.show_replicas(token_id, keyspace)
else:
self.printerr('Wait, how do I show %r?' % (showwhat,))
<property name="debuglevel" value="source,lines,vars"/>
<!-- default version and SCM information -->
- <property name="base.version" value="4.1-alpha2"/>
+ <property name="base.version" value="4.2"/>
<property name="scm.connection" value="scm:https://gitbox.apache.org/repos/asf/cassandra.git"/>
<property name="scm.developerConnection" value="scm:https://gitbox.apache.org/repos/asf/cassandra.git"/>
<property name="scm.url" value="https://gitbox.apache.org/repos/asf?p=cassandra.git;a=tree"/>
<property name="test.simulator-asm.src" value="${test.dir}/simulator/asm"/>
<property name="test.simulator-bootstrap.src" value="${test.dir}/simulator/bootstrap"/>
<property name="test.simulator-test.src" value="${test.dir}/simulator/test"/>
- <property name="test.driver.connection_timeout_ms" value="5000"/>
- <property name="test.driver.read_timeout_ms" value="12000"/>
+ <property name="test.driver.connection_timeout_ms" value="10000"/>
+ <property name="test.driver.read_timeout_ms" value="24000"/>
<property name="test.jvm.args" value="" />
<property name="dist.dir" value="${build.dir}/dist"/>
<property name="tmp.dir" value="${java.io.tmpdir}"/>
<property name="maven-repository-url" value="https://repository.apache.org/content/repositories/snapshots"/>
<property name="maven-repository-id" value="apache.snapshots.https"/>
- <property name="test.timeout" value="240000" />
+ <property name="test.timeout" value="480000" />
<property name="test.memory.timeout" value="480000" />
<property name="test.long.timeout" value="600000" />
<property name="test.burn.timeout" value="60000000" />
<property name="test.distributed.timeout" value="900000" />
+ <property name="test.simulation.timeout" value="1800000" />
<!-- default for cql tests. Can be override by -Dcassandra.test.use_prepared=false -->
<property name="cassandra.test.use_prepared" value="true" />
<dependency groupId="org.apache.hadoop" artifactId="hadoop-core" version="1.0.3" scope="provided">
<exclusion groupId="org.mortbay.jetty" artifactId="servlet-api"/>
<exclusion groupId="commons-logging" artifactId="commons-logging"/>
+ <exclusion groupId="commons-lang" artifactId="commons-lang"/>
<exclusion groupId="org.eclipse.jdt" artifactId="core"/>
<exclusion groupId="ant" artifactId="ant"/>
<exclusion groupId="junit" artifactId="junit"/>
<exclusion groupId="net.java.dev.jna" artifactId="jna" />
<exclusion groupId="net.java.dev.jna" artifactId="jna-platform" />
</dependency>
- <dependency groupId="com.google.code.findbugs" artifactId="jsr305" version="2.0.2" scope="provided"/>
+ <dependency groupId="com.google.code.findbugs" artifactId="jsr305" version="2.0.2"/>
<dependency groupId="com.clearspring.analytics" artifactId="stream" version="2.5.2">
<exclusion groupId="it.unimi.dsi" artifactId="fastutil" />
</dependency>
ant testsome -Dtest.name=org.apache.cassandra.service.StorageServiceServerTest -Dtest.methods=testRegularMode,testGetAllRangesEmpty
-->
<target name="testsome" depends="build-test" description="Execute specific unit tests" >
+ <condition property="withoutMethods">
+ <and>
+ <equals arg1="${test.methods}" arg2=""/>
+ <not>
+ <contains string="${test.name}" substring="*"/>
+ </not>
+ </and>
+ </condition>
+ <condition property="withMethods">
+ <and>
+ <not>
+ <equals arg1="${test.methods}" arg2=""/>
+ </not>
+ <not>
+ <contains string="${test.name}" substring="*"/>
+ </not>
+ </and>
+ </condition>
<testmacro inputdir="${test.unit.src}" timeout="${test.timeout}">
- <test unless:blank="${test.methods}" name="${test.name}" methods="${test.methods}" outfile="build/test/output/TEST-${test.name}-${test.methods}"/>
- <test if:blank="${test.methods}" name="${test.name}" outfile="build/test/output/TEST-${test.name}"/>
+ <test if="withMethods" name="${test.name}" methods="${test.methods}" outfile="build/test/output/TEST-${test.name}-${test.methods}"/>
+ <test if="withoutMethods" name="${test.name}" outfile="build/test/output/TEST-${test.name}"/>
<jvmarg value="-Dlegacy-sstable-root=${test.data}/legacy-sstables"/>
<jvmarg value="-Dinvalid-legacy-sstable-root=${test.data}/invalid-legacy-sstables"/>
<jvmarg value="-Dcassandra.ring_delay_ms=1000"/>
<jvmarg value="-Dcassandra.tolerate_sstable_size=true"/>
<jvmarg value="-Dcassandra.skip_sync=true" />
</testmacro>
- <testmacro inputdir="${test.simulator-test.src}" timeout="${test.distributed.timeout}" forkmode="perTest" showoutput="true" filter="**/test/${test.name}.java">
+ </target>
+
+ <target name="test-simulator-dtest" depends="build-test" description="Execute simulator dtests">
+ <testmacro inputdir="${test.simulator-test.src}" timeout="${test.simulation.timeout}" forkmode="perTest" showoutput="true" filter="**/test/${test.name}.java">
<jvmarg value="-Dlogback.configurationFile=test/conf/logback-simulator.xml"/>
<jvmarg value="-Dcassandra.ring_delay_ms=10000"/>
<jvmarg value="-Dcassandra.tolerate_sstable_size=true"/>
<jvmarg value="-Dcassandra.skip_sync=true" />
+ <jvmarg value="-Dcassandra.debugrefcount=false"/>
+ <jvmarg value="-Dcassandra.test.simulator.determinismcheck=strict"/>
<!-- Support Simulator Tests -->
<jvmarg line="-javaagent:${test.lib}/jars/simulator-asm.jar"/>
<jvmarg line="-Xbootclasspath/a:${test.lib}/jars/simulator-bootstrap.jar"/>
<jvmarg line="-XX:ActiveProcessorCount=4"/>
<jvmarg line="-XX:-TieredCompilation"/>
+ <jvmarg line="-XX:-BackgroundCompilation"/>
+ <jvmarg line="-XX:CICompilerCount=1"/>
<jvmarg line="-XX:Tier4CompileThreshold=1000"/>
<jvmarg line="-XX:ReservedCodeCacheSize=256M"/>
+ <jvmarg line="-Xmx8G"/>
</testmacro>
</target>
<property name="idFormat" value="blockSystemClock"/>
<property name="influenceFormat" value="0"/>
</module>
+
+ <module name="SuppressWithNearbyCommentFilter">
+ <property name="commentFormat" value="checkstyle: permit this invocation"/>
+ <property name="idFormat" value="blockPathToFile"/>
+ <property name="influenceFormat" value="0"/>
+ </module>
<module name="RegexpSinglelineJava">
<!-- block system time -->
<module name="IllegalInstantiation">
<property name="classes" value="java.io.File,java.lang.Thread,java.util.concurrent.FutureTask,java.util.concurrent.Semaphore,java.util.concurrent.CountDownLatch,java.util.concurrent.ScheduledThreadPoolExecutor,java.util.concurrent.ThreadPoolExecutor,java.util.concurrent.ForkJoinPool,java.lang.OutOfMemoryError"/>
</module>
+
+ <module name="RegexpSinglelineJava">
+ <!-- block Path#toFile() -->
+ <property name="id" value="blockPathToFile"/>
+ <property name="format" value="toFile\(\)"/>
+ <property name="message" value="Avoid Path#toFile(), as some implementations may not support it." />
+ </module>
</module>
</module>
# Min unit: ms
max_hint_window: 3h
-# Maximum throttle in KBs per second, per delivery thread. This will be
+# Maximum throttle in KiBs per second, per delivery thread. This will be
# reduced proportionally to the number of nodes in the cluster. (If there
# are two nodes in the cluster, each delivery thread will use the maximum
# rate; if there are three, each will throttle to half of the maximum,
# Min unit: ms
hints_flush_period: 10000ms
-# Maximum size for a single hints file, in megabytes.
+# Maximum size for a single hints file, in mebibytes.
# Min unit: MiB
max_hints_file_size: 128MiB
#
# hint_window_persistent_enabled: true
-# Maximum throttle in KBs per second, total. This will be
+# Maximum throttle in KiBs per second, total. This will be
# reduced proportionally to the number of nodes in the cluster.
# Min unit: KiB
batchlog_replay_throttle: 1024KiB
# containing a CDC-enabled table if at space limit in cdc_raw_directory).
cdc_enabled: false
+# Specify whether writes to the CDC-enabled tables should be blocked when CDC data on disk has reached to the limit.
+# When setting to false, the writes will not be blocked and the oldest CDC data on disk will be deleted to
+# ensure the size constraint. The default is true.
+# cdc_block_writes: true
+
+# Specify whether CDC mutations are replayed through the write path on streaming, e.g. repair.
+# When enabled, CDC data streamed to the destination node will be written into commit log first. When setting to false,
+# the streamed CDC data is written into SSTables just the same as normal streaming. The default is true.
+# If this is set to false, streaming will be considerably faster however it's possible that, in extreme situations
+# (losing > quorum # nodes in a replica set), you may have data in your SSTables that never makes it to the CDC log.
+# cdc_on_repair_enabled: true
+
# CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the
# segment contains mutations for a CDC-enabled table. This should be placed on a
# separate spindle than the data directories. If not set, the default directory is
#
# Default value ("auto") is 1/256th of the heap or 10MiB, whichever is greater
# Min unit: MiB
-# prepared_statements_cache_size:
+prepared_statements_cache_size:
# Maximum size of the key cache in memory.
#
#
# Default value is empty to make it "auto" (min(5% of Heap (in MiB), 100MiB)). Set to 0 to disable key cache.
# Min unit: MiB
-# key_cache_size:
+key_cache_size:
# Duration in seconds after which Cassandra should
# save the key cache. Caches are saved to saved_caches_directory as
# Default value is empty to make it "auto" (min(2.5% of Heap (in MiB), 50MiB)). Set to 0 to disable counter cache.
# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
# Min unit: MiB
-# counter_cache_size:
+counter_cache_size:
# Duration in seconds after which Cassandra should
# save the counter cache (keys only). Caches are saved to saved_caches_directory as
# is 1/16th of the available heap. The main tradeoff is that smaller trees
# have less resolution, which can lead to over-streaming data. If you see heap
# pressure during repairs, consider lowering this, but you cannot go below
-# one megabyte. If you see lots of over-streaming, consider raising
+# one mebibyte. If you see lots of over-streaming, consider raising
# this or using subrange repair.
#
# For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096.
# is a best-effort process. In extreme conditions Cassandra may need to use
# more than this amount of memory.
# Min unit: KiB
-# index_summary_capacity:
+index_summary_capacity:
# How frequently index summaries should be resampled. This is done
# periodically to redistribute memory from the fixed-size pool to sstables
-# proportional their recent read rates. Setting to -1 will disable this
+# proportional their recent read rates. Setting to null value will disable this
# process, leaving existing index summaries at their current sampling level.
# Min unit: m
index_summary_resize_interval: 60m
# are completely written, and used in place of the prior sstables for
# any range that has been written. This helps to smoothly transfer reads
# between the sstables, reducing page cache churn and keeping hot rows hot
+# Set sstable_preemptive_open_interval to null for disabled which is equivalent to
+# sstable_preemptive_open_interval_in_mb being negative
# Min unit: MiB
sstable_preemptive_open_interval: 50MiB
# set to true, each newly created sstable will have a UUID based generation identifier and such files are
# not readable by previous Cassandra versions. At some point, this option will become true by default
# and eventually get removed from the configuration.
-enable_uuid_sstable_identifiers: false
+uuid_sstable_identifiers_enabled: false
# When enabled, permits Cassandra to zero-copy stream entire eligible
# SSTables between nodes, including every component.
# Set to a valid keystore if internode_encryption is dc, rack or all
keystore: conf/.keystore
keystore_password: cassandra
+ # During internode mTLS authentication, inbound connections (acting as servers) use keystore, keystore_password
+ # containing server certificate to create SSLContext and
+ # outbound connections (acting as clients) use outbound_keystore & outbound_keystore_password with client certificates
+ # to create SSLContext. By default, outbound_keystore is the same as keystore indicating mTLS is not enabled.
+# outbound_keystore: conf/.keystore
+# outbound_keystore_password: cassandra
# Verify peer server certificates
require_client_auth: false
# Set to a valid trustore if require_client_auth is true
# The two thresholds default to -1 to disable.
# keyspaces_warn_threshold: -1
# keyspaces_fail_threshold: -1
+#
# Guardrail to warn or fail when creating more user tables than threshold.
# The two thresholds default to -1 to disable.
# tables_warn_threshold: -1
# tables_fail_threshold: -1
+#
# Guardrail to enable or disable the ability to create uncompressed tables
# uncompressed_tables_enabled: true
+#
# Guardrail to warn or fail when creating/altering a table with more columns per table than threshold.
# The two thresholds default to -1 to disable.
# columns_per_table_warn_threshold: -1
# columns_per_table_fail_threshold: -1
+#
# Guardrail to warn or fail when creating more secondary indexes per table than threshold.
# The two thresholds default to -1 to disable.
# secondary_indexes_per_table_warn_threshold: -1
# secondary_indexes_per_table_fail_threshold: -1
+#
# Guardrail to enable or disable the creation of secondary indexes
# secondary_indexes_enabled: true
+#
# Guardrail to warn or fail when creating more materialized views per table than threshold.
# The two thresholds default to -1 to disable.
# materialized_views_per_table_warn_threshold: -1
# materialized_views_per_table_fail_threshold: -1
+#
# Guardrail to warn about, ignore or reject properties when creating tables. By default all properties are allowed.
# table_properties_warned: []
# table_properties_ignored: []
# table_properties_disallowed: []
+#
# Guardrail to allow/disallow user-provided timestamps. Defaults to true.
# user_timestamps_enabled: true
+#
# Guardrail to allow/disallow GROUP BY functionality.
# group_by_enabled: true
+#
# Guardrail to allow/disallow TRUNCATE and DROP TABLE statements
# drop_truncate_table_enabled: true
+#
+# Guardrail to allow/disallow DROP KEYSPACE statements
+# drop_keyspace_enabled: true
+#
# Guardrail to warn or fail when using a page size greater than threshold.
# The two thresholds default to -1 to disable.
# page_size_warn_threshold: -1
# page_size_fail_threshold: -1
+#
# Guardrail to allow/disallow list operations that require read before write, i.e. setting list element by index and
# removing list elements by either index or value. Defaults to true.
# read_before_write_list_operations_enabled: true
+#
# Guardrail to warn or fail when querying with an IN restriction selecting more partition keys than threshold.
# The two thresholds default to -1 to disable.
# partition_keys_in_select_warn_threshold: -1
# partition_keys_in_select_fail_threshold: -1
+#
# Guardrail to warn or fail when an IN query creates a cartesian product with a size exceeding threshold,
# eg. "a in (1,2,...10) and b in (1,2...10)" results in cartesian product of 100.
# The two thresholds default to -1 to disable.
# in_select_cartesian_product_warn_threshold: -1
# in_select_cartesian_product_fail_threshold: -1
+#
# Guardrail to warn about or reject read consistency levels. By default, all consistency levels are allowed.
# read_consistency_levels_warned: []
# read_consistency_levels_disallowed: []
+#
# Guardrail to warn about or reject write consistency levels. By default, all consistency levels are allowed.
# write_consistency_levels_warned: []
# write_consistency_levels_disallowed: []
+#
# Guardrail to warn or fail when encountering larger size of collection data than threshold.
# At query time this guardrail is applied only to the collection fragment that is being writen, even though in the case
# of non-frozen collections there could be unaccounted parts of the collection on the sstables. This is done this way to
# collection_size_warn_threshold:
# Min unit: B
# collection_size_fail_threshold:
+#
# Guardrail to warn or fail when encountering more elements in collection than threshold.
# At query time this guardrail is applied only to the collection fragment that is being writen, even though in the case
# of non-frozen collections there could be unaccounted parts of the collection on the sstables. This is done this way to
# The two thresholds default to -1 to disable.
# items_per_collection_warn_threshold: -1
# items_per_collection_fail_threshold: -1
+#
# Guardrail to allow/disallow querying with ALLOW FILTERING. Defaults to true.
# allow_filtering_enabled: true
+#
+# Guardrail to allow/disallow setting SimpleStrategy via keyspace creation or alteration. Defaults to true.
+# simplestrategy_enabled: true
+#
# Guardrail to warn or fail when creating a user-defined-type with more fields in than threshold.
# Default -1 to disable.
# fields_per_udt_warn_threshold: -1
# fields_per_udt_fail_threshold: -1
+#
+# Guardrail to indicate whether or not users are allowed to use ALTER TABLE commands to make column changes to tables
+# alter_table_enabled: true
+#
# Guardrail to warn or fail when local data disk usage percentage exceeds threshold. Valid values are in [1, 100].
# This is only used for the disks storing data directories, so it won't count any separate disks used for storing
# the commitlog, hints nor saved caches. The disk usage is the ratio between the amount of space used by the data
# The two thresholds default to -1 to disable.
# data_disk_usage_percentage_warn_threshold: -1
# data_disk_usage_percentage_fail_threshold: -1
-# Allows defining the max disk size of the data directories when calculating thresholds for
+#
+# Guardrail that allows users to define the max disk size of the data directories when calculating thresholds for
# disk_usage_percentage_warn_threshold and disk_usage_percentage_fail_threshold, so if this is greater than zero they
# become percentages of a fixed size on disk instead of percentages of the physically available disk size. This should
# be useful when we have a large disk and we only want to use a part of it for Cassandra's data directories.
# Defaults to null to disable and use the physically available disk size of data directories during calculations.
# Min unit: B
# data_disk_usage_max_disk_size:
+#
# Guardrail to warn or fail when the minimum replication factor is lesser than threshold.
# This would also apply to system keyspaces.
# Suggested value for use in production: 2 or higher
# minimum_replication_factor_warn_threshold: -1
# minimum_replication_factor_fail_threshold: -1
+#
+# Guardrail to warn or fail when the maximum replication factor is greater than threshold.
+# This would also apply to system keyspaces.
+# maximum_replication_factor_warn_threshold: -1
+# maximum_replication_factor_fail_threshold: -1
# Startup Checks are executed as part of Cassandra startup process, not all of them
# are configurable (so you can disable them) but these which are enumerated bellow.
if [ -z "$2" ]; then
chown -R cassandra: /var/lib/cassandra
chown -R cassandra: /var/log/cassandra
+ chmod 750 /var/lib/cassandra/
+ chmod 750 /var/log/cassandra/
fi
if ! sysctl -p /etc/sysctl.d/cassandra.conf; then
echo >&2
-cassandra (4.1~alpha2) UNRELEASED; urgency=medium
+cassandra (4.2) UNRELEASED; urgency=medium
* New release
- -- Mick Semb Wever <mck@apache.org> Fri, 20 May 2022 22:02:50 +0200
-
-cassandra (4.1~alpha1) unstable; urgency=medium
-
- * New release
-
- -- Mick Semb Wever <mck@apache.org> Fri, 20 May 2022 22:02:50 +0200
+ -- Mick Semb Wever <mck@apache.org> Wed, 21 Apr 2021 19:24:28 +0200
cassandra (4.0~rc1) unstable; urgency=medium
<selector> ::= <identifier>
| <term>
- | WRITETIME '(' <identifier> ')'
+ | WRITETIME '(' <selector> ')'
+ | MAXWRITETIME '(' <selector> ')'
| COUNT '(' '*' ')'
- | TTL '(' <identifier> ')'
+ | TTL '(' <selector> ')'
| CAST '(' <selector> AS <type> ')'
| <function> '(' (<selector> (',' <selector>)*)? ')'
| <selector> '.' <identifier>
The @<select-clause>@ determines which columns needs to be queried and returned in the result-set. It consists of either the comma-separated list of <selector> or the wildcard character (@*@) to select all the columns defined for the table. Please note that for wildcard @SELECT@ queries the order of columns returned is not specified and is not guaranteed to be stable between Cassandra versions.
-A @<selector>@ is either a column name to retrieve or a @<function>@ of one or more @<term>@s. The function allowed are the same as for @<term>@ and are described in the "function section":#functions. In addition to these generic functions, the @WRITETIME@ (resp. @TTL@) function allows to select the timestamp of when the column was inserted (resp. the time to live (in seconds) for the column (or null if the column has no expiration set)) and the "@CAST@":#castFun function can be used to convert one data type to another.
+A @<selector>@ is either a column name to retrieve or a @<function>@ of one or more @<term>@s. The function allowed are the same as for @<term>@ and are described in the "function section":#functions. In addition to these generic functions, the @WRITETIME@ and @MAXWRITETIME@ (resp. @TTL@) function allows to select the timestamp of when the column was inserted (resp. the time to live (in seconds) for the column (or null if the column has no expiration set)) and the "@CAST@":#castFun function can be used to convert one data type to another.
Additionally, individual values of maps and sets can be selected using @[ <term> ]@. For maps, this will return the value corresponding to the key, if such entry exists. For sets, this will return the key that is selected if it exists and is thus mainly a way to check element existence. It is also possible to select a slice of a set or map with @[ <term> ... <term> @], where both bound can be omitted.
h2(#aggregates). Aggregates
Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.
-If @normal@ columns, @scalar functions@, @UDT@ fields, @writetime@ or @ttl@ are selected together with aggregate functions, the values returned for them will be the ones of the first row matching the query.
+If @normal@ columns, @scalar functions@, @UDT@ fields, @writetime@, @maxwritetime@ or @ttl@ are selected together with aggregate functions, the values returned for them will be the ones of the first row matching the query.
CQL3 distinguishes between built-in aggregates (so called 'native aggregates') and "user-defined aggregates":#udas. CQL3 includes several native aggregates, described below:
| @WHERE@ | yes |
| @WITH@ | yes |
| @WRITETIME@ | no |
+| @MAXWRITETIME@ | no |
h2(#appendixB). Appendix B: CQL Reserved Types
|`WHERE` |yes
|`WITH` |yes
|`WRITETIME` |no
+|`MAXWRITETIME` |no
|===
== Appendix B: CQL Reserved Types
The following describes the changes in each version of CQL.
+== 3.4.6
+
+* Add support for IF EXISTS and IF NOT EXISTS in ALTER statements (`16916`)
+* Allow GRANT/REVOKE multiple permissions in a single statement (`17030`)
+* Pre hashed passwords in CQL (`17334`)
+* Add support for type casting in WHERE clause components and in the values of INSERT/UPDATE statements (`14337`)
+* Add support for CONTAINS and CONTAINS KEY in conditional UPDATE and DELETE statement (`10537`)
+* Allow to grant permission for all tables in a keyspace (`17027`)
+* Allow to aggregate by time intervals (`11871`)
+
== 3.4.5
* Adds support for arithmetic operators (`11935`)
::= +
| +
| WRITETIME `(' `)' +
+| MAXWRITETIME `(' `)' +
| COUNT `(' `*' `)' +
| TTL `(' `)' +
| CAST `(' AS `)' +
A `<selector>` is either a column name to retrieve or a `<function>` of
one or more `<term>`s. The function allowed are the same as for `<term>`
and are described in the link:#functions[function section]. In addition
-to these generic functions, the `WRITETIME` (resp. `TTL`) function
-allows to select the timestamp of when the column was inserted (resp.
+to these generic functions, the `WRITETIME` and `MAXWRITETIME` (resp. `TTL`)
+function allows to select the timestamp of when the column was inserted (resp.
the time to live (in seconds) for the column (or null if the column has
no expiration set)) and the link:#castFun[`CAST`] function can be used
to convert one data type to another. The `WRITETIME` and `TTL` functions
Aggregate functions work on a set of rows. They receive values for each
row and returns one value for the whole set. +
-If `normal` columns, `scalar functions`, `UDT` fields, `writetime` or
-`ttl` are selected together with aggregate functions, the values
+If `normal` columns, `scalar functions`, `UDT` fields, `writetime`, `maxwritetime`
+or `ttl` are selected together with aggregate functions, the values
returned for them will be the ones of the first row matching the query.
CQL3 distinguishes between built-in aggregates (so called `native
====
[[writetime-and-ttl-function]]
-==== `WRITETIME` and `TTL` function
+==== `WRITETIME`, `MAXWRITETIME` and `TTL` function
-Selection supports two special functions that aren't allowed anywhere
-else: `WRITETIME` and `TTL`.
-Both functions take only one argument, a column name.
+Selection supports three special functions that aren't allowed anywhere
+else: `WRITETIME`, `MAXWRITETIME` and `TTL`.
+All functions take only one argument, a column name. If the column is a collection or UDT, it's possible to add element
+selectors, such as `WRITETTIME(phones[2..4])` or `WRITETTIME(user.name)`.
These functions retrieve meta-information that is stored internally for each column:
-* `WRITETIME` stores the timestamp of the value of the column
+* `WRITETIME` stores the timestamp of the value of the column.
+* `MAXWRITETIME` stores the largest timestamp of the value of the column. For non-collection and non-UDT columns, `MAXWRITETIME`
+is equivalent to `WRITETIME`. In the other cases, it returns the largest timestamp of the values in the column.
* `TTL` stores the remaining time to live (in seconds) for the value of the column if it is set to expire; otherwise the value is `null`.
-The `WRITETIME` and `TTL` functions can't be used on multi-cell columns such as non-frozen
-collections or non-frozen user-defined types.
+The `WRITETIME` and `TTL` functions can be used on multi-cell columns such as non-frozen collections or non-frozen
+user-defined types. In that case, the functions will return the list of timestamps or TTLs for each selected cell.
[[where-clause]]
=== The `WHERE` clause
= Frequently Asked Questions
-* `why-cant-list-all`
-* `what-ports`
-* `what-happens-on-joins`
-* `asynch-deletes`
-* `one-entry-ring`
-* `can-large-blob`
-* `nodetool-connection-refused`
-* `to-batch-or-not-to-batch`
-* `selinux`
-* `how-to-unsubscribe`
-* `cassandra-eats-all-my-memory`
-* `what-are-seeds`
-* `are-seeds-SPOF`
-* `why-message-dropped`
-* `oom-map-failed`
-* `what-on-same-timestamp-update`
-* `why-bootstrapping-stream-error`
-
[[why-cant-list-all]]
== Why can't I set `listen_address` to listen on 0.0.0.0 (all my addresses)?
------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
address | 127.0.0.1
port | 50687
- client_options | {'CQL_VERSION': '3.4.5', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
+ client_options | {'CQL_VERSION': '3.4.6', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
connection_stage | ready
driver_name | DataStax Python Driver
driver_version | 3.25.0
------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
address | 127.0.0.1
port | 50688
- client_options | {'CQL_VERSION': '3.4.5', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
+ client_options | {'CQL_VERSION': '3.4.6', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
connection_stage | ready
driver_name | DataStax Python Driver
driver_version | 3.25.0
`disableautocompaction`::
Disable compaction.
`setcompactionthroughput`::
- How fast compaction should run at most - defaults to 16MB/s, but note
- that it is likely not possible to reach this throughput.
+ How fast compaction should run at most - defaults to 64MiB/s.
`compactionstats`::
Statistics about current and pending compactions.
`compactionhistory`::
Connected to Prod_Cluster at 192.0.0.1:9042.
----
+=== `SHOW REPLICAS`
+
+Prints the IP addresses of the Cassandra nodes which are replicas for the
+listed given token and keyspace. This command is available from Cassandra 4.2.
+
+`Usage`: `SHOW REPLICAS <token> (<keyspace>)`
+
+Example usage:
+
+[source,none]
+----
+cqlsh> SHOW REPLICAS 95
+['192.0.0.1', '192.0.0.2']
+----
+
=== `SHOW SESSION`
Pretty prints a specific tracing session.
<properties>
<property name="project.dir">..</property>
<!-- the compile classpaths should be distinct per compilation unit… but it is kept simple and the build will catch errors -->
- <property name="cassandra.classpath.jars">${project.dir}/build/lib/jars/HdrHistogram-2.1.9.jar:${project.dir}/build/lib/jars/ST4-4.0.8.jar:${project.dir}/build/lib/jars/airline-0.8.jar:${project.dir}/build/lib/jars/antlr-3.5.2.jar:${project.dir}/build/lib/jars/antlr-runtime-3.5.2.jar:${project.dir}/build/lib/jars/asm-7.1.jar:${project.dir}/build/lib/jars/assertj-core-3.15.0.jar:${project.dir}/build/lib/jars/byteman-4.0.6.jar:${project.dir}/build/lib/jars/byteman-bmunit-4.0.6.jar:${project.dir}/build/lib/jars/byteman-install-4.0.6.jar:${project.dir}/build/lib/jars/byteman-submit-4.0.6.jar:${project.dir}/build/lib/jars/caffeine-2.3.5.jar:${project.dir}/build/lib/jars/cassandra-driver-core-3.11.0-shaded.jar:${project.dir}/build/lib/jars/chronicle-bytes-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-core-2.20.126.jar:${project.dir}/build/lib/jars/chronicle-queue-5.20.123.jar:${project.dir}/build/lib/jars/chronicle-threads-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-wire-2.20.117.jar:${project.dir}/build/lib/jars/commons-beanutils-1.7.0.jar:${project.dir}/build/lib/jars/commons-beanutils-core-1.8.0.jar:${project.dir}/build/lib/jars/commons-cli-1.1.jar:${project.dir}/build/lib/jars/commons-codec-1.9.jar:${project.dir}/build/lib/jars/commons-collections-3.2.1.jar:${project.dir}/build/lib/jars/commons-configuration-1.6.jar:${project.dir}/build/lib/jars/commons-digester-1.8.jar:${project.dir}/build/lib/jars/commons-el-1.0.jar:${project.dir}/build/lib/jars/commons-httpclient-3.0.1.jar:${project.dir}/build/lib/jars/commons-lang-2.4.jar:${project.dir}/build/lib/jars/commons-lang3-3.11.jar:${project.dir}/build/lib/jars/commons-math-2.1.jar:${project.dir}/build/lib/jars/commons-math3-3.2.jar:${project.dir}/build/lib/jars/commons-net-1.4.1.jar:${project.dir}/build/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/lib/jars/compress-lzf-0.8.4.jar:${project.dir}/build/lib/jars/concurrent-trees-2.4.0.jar:${project.dir}/build/lib/jars/ecj-4.6.1.jar:${project.dir}/build/lib/jars/ftplet-api-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-core-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-deprecated-1.0.0-M2.jar:${project.dir}/build/lib/jars/guava-27.0-jre.jar:${project.dir}/build/lib/jars/hadoop-core-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-minicluster-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-test-1.0.3.jar:${project.dir}/build/lib/jars/high-scale-lib-1.0.6.jar:${project.dir}/build/lib/jars/hppc-0.8.1.jar:${project.dir}/build/lib/jars/hsqldb-1.8.0.10.jar:${project.dir}/build/lib/jars/j2objc-annotations-1.3.jar:${project.dir}/build/lib/jars/jackson-annotations-2.9.10.jar:${project.dir}/build/lib/jars/jackson-core-2.9.10.jar:${project.dir}/build/lib/jars/jackson-core-asl-1.0.1.jar:${project.dir}/build/lib/jars/jackson-databind-2.9.10.8.jar:${project.dir}/build/lib/jars/jackson-mapper-asl-1.0.1.jar:${project.dir}/build/lib/jars/jacocoagent.jar:${project.dir}/build/lib/jars/jamm-0.3.2.jar:${project.dir}/build/lib/jars/jasper-compiler-5.5.12.jar:${project.dir}/build/lib/jars/jasper-runtime-5.5.12.jar:${project.dir}/build/lib/jars/java-cup-runtime-11b-20160615.jar:${project.dir}/build/lib/jars/javax.inject-1.jar:${project.dir}/build/lib/jars/jbcrypt-0.3m.jar:${project.dir}/build/lib/jars/jcl-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/jcommander-1.30.jar:${project.dir}/build/lib/jars/jctools-core-3.1.0.jar:${project.dir}/build/lib/jars/jersey-core-1.0.jar:${project.dir}/build/lib/jars/jersey-server-1.0.jar:${project.dir}/build/lib/jars/jets3t-0.7.1.jar:${project.dir}/build/lib/jars/jetty-6.1.26.jar:${project.dir}/build/lib/jars/jetty-util-6.1.26.jar:${project.dir}/build/lib/jars/jflex-1.8.2.jar:${project.dir}/build/lib/jars/jna-5.6.0.jar:${project.dir}/build/lib/jars/json-simple-1.1.jar:${project.dir}/build/lib/jars/jsp-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsp-api-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsr305-2.0.2.jar:${project.dir}/build/lib/jars/jsr311-api-1.0.jar:${project.dir}/build/lib/jars/jvm-attach-api-1.5.jar:${project.dir}/build/lib/jars/kfs-0.3.jar:${project.dir}/build/lib/jars/log4j-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/logback-classic-1.2.3.jar:${project.dir}/build/lib/jars/logback-core-1.2.3.jar:${project.dir}/build/lib/jars/lz4-java-1.7.1.jar:${project.dir}/build/lib/jars/metrics-core-3.1.5.jar:${project.dir}/build/lib/jars/metrics-jvm-3.1.5.jar:${project.dir}/build/lib/jars/metrics-logback-3.1.5.jar:${project.dir}/build/lib/jars/mina-core-2.0.0-M5.jar:${project.dir}/build/lib/jars/mxdump-0.14.jar:${project.dir}/build/lib/jars/netty-all-4.1.58.Final.jar:${project.dir}/build/lib/jars/netty-tcnative-boringssl-static-2.0.36.Final.jar:${project.dir}/build/lib/jars/ohc-core-0.5.1.jar:${project.dir}/build/lib/jars/ohc-core-j8-0.5.1.jar:${project.dir}/build/lib/jars/oro-2.0.8.jar:${project.dir}/build/lib/jars/psjava-0.1.19.jar:${project.dir}/build/lib/jars/reporter-config-base-3.0.3.jar:${project.dir}/build/lib/jars/reporter-config3-3.0.3.jar:${project.dir}/build/lib/jars/servlet-api-2.5-6.1.14.jar:${project.dir}/build/lib/jars/sigar-1.6.4.jar:${project.dir}/build/lib/jars/sjk-cli-0.14.jar:${project.dir}/build/lib/jars/sjk-core-0.14.jar:${project.dir}/build/lib/jars/sjk-json-0.14.jar:${project.dir}/build/lib/jars/sjk-stacktrace-0.14.jar:${project.dir}/build/lib/jars/slf4j-api-1.7.25.jar:${project.dir}/build/lib/jars/snakeyaml-1.26.jar:${project.dir}/build/lib/jars/snappy-java-1.1.2.6.jar:${project.dir}/build/lib/jars/snowball-stemmer-1.3.0.581.1.jar:${project.dir}/build/lib/jars/stream-2.5.2.jar:${project.dir}/build/lib/jars/xmlenc-0.52.jar:${project.dir}/build/lib/jars/zstd-jni-1.3.8-5.jar:${project.dir}/build/test/lib/jars/animal-sniffer-annotations-1.14.jar:${project.dir}/build/test/lib/jars/ant-1.9.7.jar:${project.dir}/build/test/lib/jars/ant-junit-1.9.7.jar:${project.dir}/build/test/lib/jars/ant-launcher-1.9.7.jar:${project.dir}/build/test/lib/jars/asm-6.0.jar:${project.dir}/build/test/lib/jars/asm-analysis-6.0.jar:${project.dir}/build/test/lib/jars/asm-commons-6.0.jar:${project.dir}/build/test/lib/jars/asm-tree-6.0.jar:${project.dir}/build/test/lib/jars/asm-util-6.0.jar:${project.dir}/build/test/lib/jars/asm-xml-6.0.jar:${project.dir}/build/test/lib/jars/assertj-core-3.15.0.jar:${project.dir}/build/test/lib/jars/awaitility-4.0.3.jar:${project.dir}/build/test/lib/jars/byte-buddy-1.10.5.jar:${project.dir}/build/test/lib/jars/byte-buddy-agent-1.10.5.jar:${project.dir}/build/test/lib/jars/byteman-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-bmunit-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-install-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-submit-4.0.6.jar:${project.dir}/build/test/lib/jars/checker-qual-2.0.0.jar:${project.dir}/build/test/lib/jars/commons-io-2.6.jar:${project.dir}/build/test/lib/jars/commons-math3-3.2.jar:${project.dir}/build/test/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/test/lib/jars/dtest-api-0.0.7.jar:${project.dir}/build/test/lib/jars/error_prone_annotations-2.0.18.jar:${project.dir}/build/test/lib/jars/guava-23.5-android.jar:${project.dir}/build/test/lib/jars/hamcrest-2.2.jar:${project.dir}/build/test/lib/jars/j2objc-annotations-1.1.jar:${project.dir}/build/test/lib/jars/java-allocation-instrumenter-3.1.0.jar:${project.dir}/build/test/lib/jars/javassist-3.26.0-GA.jar:${project.dir}/build/test/lib/jars/jmh-core-1.21.jar:${project.dir}/build/test/lib/jars/jmh-generator-annprocess-1.21.jar:${project.dir}/build/test/lib/jars/jopt-simple-4.6.jar:${project.dir}/build/test/lib/jars/jsr305-1.3.9.jar:${project.dir}/build/test/lib/jars/junit-4.12.jar:${project.dir}/build/test/lib/jars/mockito-core-3.2.4.jar:${project.dir}/build/test/lib/jars/objenesis-2.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.agent-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.ant-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.core-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.report-0.8.6.jar:${project.dir}/build/test/lib/jars/quicktheories-0.26.jar:${project.dir}/build/test/lib/jars/reflections-0.9.12.jar:${project.dir}/build/test/lib/jars/slf4j-api-1.7.25.jar:</property>
+ <property name="cassandra.classpath.jars">${project.dir}/build/lib/jars/HdrHistogram-2.1.9.jar:${project.dir}/build/lib/jars/ST4-4.0.8.jar:${project.dir}/build/lib/jars/airline-0.8.jar:${project.dir}/build/lib/jars/antlr-3.5.2.jar:${project.dir}/build/lib/jars/antlr-runtime-3.5.2.jar:${project.dir}/build/lib/jars/asm-7.1.jar:${project.dir}/build/lib/jars/assertj-core-3.15.0.jar:${project.dir}/build/lib/jars/byteman-4.0.6.jar:${project.dir}/build/lib/jars/byteman-bmunit-4.0.6.jar:${project.dir}/build/lib/jars/byteman-install-4.0.6.jar:${project.dir}/build/lib/jars/byteman-submit-4.0.6.jar:${project.dir}/build/lib/jars/caffeine-2.3.5.jar:${project.dir}/build/lib/jars/cassandra-driver-core-3.11.0-shaded.jar:${project.dir}/build/lib/jars/chronicle-bytes-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-core-2.20.126.jar:${project.dir}/build/lib/jars/chronicle-queue-5.20.123.jar:${project.dir}/build/lib/jars/chronicle-threads-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-wire-2.20.117.jar:${project.dir}/build/lib/jars/commons-beanutils-1.7.0.jar:${project.dir}/build/lib/jars/commons-beanutils-core-1.8.0.jar:${project.dir}/build/lib/jars/commons-cli-1.1.jar:${project.dir}/build/lib/jars/commons-codec-1.9.jar:${project.dir}/build/lib/jars/commons-collections-3.2.1.jar:${project.dir}/build/lib/jars/commons-configuration-1.6.jar:${project.dir}/build/lib/jars/commons-digester-1.8.jar:${project.dir}/build/lib/jars/commons-el-1.0.jar:${project.dir}/build/lib/jars/commons-httpclient-3.0.1.jar:${project.dir}/build/lib/jars/commons-lang3-3.11.jar:${project.dir}/build/lib/jars/commons-math-2.1.jar:${project.dir}/build/lib/jars/commons-math3-3.2.jar:${project.dir}/build/lib/jars/commons-net-1.4.1.jar:${project.dir}/build/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/lib/jars/compress-lzf-0.8.4.jar:${project.dir}/build/lib/jars/concurrent-trees-2.4.0.jar:${project.dir}/build/lib/jars/ecj-4.6.1.jar:${project.dir}/build/lib/jars/ftplet-api-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-core-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-deprecated-1.0.0-M2.jar:${project.dir}/build/lib/jars/guava-27.0-jre.jar:${project.dir}/build/lib/jars/hadoop-core-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-minicluster-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-test-1.0.3.jar:${project.dir}/build/lib/jars/high-scale-lib-1.0.6.jar:${project.dir}/build/lib/jars/hppc-0.8.1.jar:${project.dir}/build/lib/jars/hsqldb-1.8.0.10.jar:${project.dir}/build/lib/jars/j2objc-annotations-1.3.jar:${project.dir}/build/lib/jars/jackson-annotations-2.9.10.jar:${project.dir}/build/lib/jars/jackson-core-2.9.10.jar:${project.dir}/build/lib/jars/jackson-core-asl-1.0.1.jar:${project.dir}/build/lib/jars/jackson-databind-2.9.10.8.jar:${project.dir}/build/lib/jars/jackson-mapper-asl-1.0.1.jar:${project.dir}/build/lib/jars/jacocoagent.jar:${project.dir}/build/lib/jars/jamm-0.3.2.jar:${project.dir}/build/lib/jars/jasper-compiler-5.5.12.jar:${project.dir}/build/lib/jars/jasper-runtime-5.5.12.jar:${project.dir}/build/lib/jars/java-cup-runtime-11b-20160615.jar:${project.dir}/build/lib/jars/javax.inject-1.jar:${project.dir}/build/lib/jars/jbcrypt-0.3m.jar:${project.dir}/build/lib/jars/jcl-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/jcommander-1.30.jar:${project.dir}/build/lib/jars/jctools-core-3.1.0.jar:${project.dir}/build/lib/jars/jersey-core-1.0.jar:${project.dir}/build/lib/jars/jersey-server-1.0.jar:${project.dir}/build/lib/jars/jets3t-0.7.1.jar:${project.dir}/build/lib/jars/jetty-6.1.26.jar:${project.dir}/build/lib/jars/jetty-util-6.1.26.jar:${project.dir}/build/lib/jars/jflex-1.8.2.jar:${project.dir}/build/lib/jars/jna-5.6.0.jar:${project.dir}/build/lib/jars/json-simple-1.1.jar:${project.dir}/build/lib/jars/jsp-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsp-api-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsr305-2.0.2.jar:${project.dir}/build/lib/jars/jsr311-api-1.0.jar:${project.dir}/build/lib/jars/jvm-attach-api-1.5.jar:${project.dir}/build/lib/jars/kfs-0.3.jar:${project.dir}/build/lib/jars/log4j-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/logback-classic-1.2.3.jar:${project.dir}/build/lib/jars/logback-core-1.2.3.jar:${project.dir}/build/lib/jars/lz4-java-1.7.1.jar:${project.dir}/build/lib/jars/metrics-core-3.1.5.jar:${project.dir}/build/lib/jars/metrics-jvm-3.1.5.jar:${project.dir}/build/lib/jars/metrics-logback-3.1.5.jar:${project.dir}/build/lib/jars/mina-core-2.0.0-M5.jar:${project.dir}/build/lib/jars/mxdump-0.14.jar:${project.dir}/build/lib/jars/netty-all-4.1.58.Final.jar:${project.dir}/build/lib/jars/netty-tcnative-boringssl-static-2.0.36.Final.jar:${project.dir}/build/lib/jars/ohc-core-0.5.1.jar:${project.dir}/build/lib/jars/ohc-core-j8-0.5.1.jar:${project.dir}/build/lib/jars/oro-2.0.8.jar:${project.dir}/build/lib/jars/psjava-0.1.19.jar:${project.dir}/build/lib/jars/reporter-config-base-3.0.3.jar:${project.dir}/build/lib/jars/reporter-config3-3.0.3.jar:${project.dir}/build/lib/jars/servlet-api-2.5-6.1.14.jar:${project.dir}/build/lib/jars/sigar-1.6.4.jar:${project.dir}/build/lib/jars/sjk-cli-0.14.jar:${project.dir}/build/lib/jars/sjk-core-0.14.jar:${project.dir}/build/lib/jars/sjk-json-0.14.jar:${project.dir}/build/lib/jars/sjk-stacktrace-0.14.jar:${project.dir}/build/lib/jars/slf4j-api-1.7.25.jar:${project.dir}/build/lib/jars/snakeyaml-1.26.jar:${project.dir}/build/lib/jars/snappy-java-1.1.2.6.jar:${project.dir}/build/lib/jars/snowball-stemmer-1.3.0.581.1.jar:${project.dir}/build/lib/jars/stream-2.5.2.jar:${project.dir}/build/lib/jars/xmlenc-0.52.jar:${project.dir}/build/lib/jars/zstd-jni-1.3.8-5.jar:${project.dir}/build/test/lib/jars/animal-sniffer-annotations-1.14.jar:${project.dir}/build/test/lib/jars/ant-1.9.7.jar:${project.dir}/build/test/lib/jars/ant-junit-1.9.7.jar:${project.dir}/build/test/lib/jars/ant-launcher-1.9.7.jar:${project.dir}/build/test/lib/jars/asm-6.0.jar:${project.dir}/build/test/lib/jars/asm-analysis-6.0.jar:${project.dir}/build/test/lib/jars/asm-commons-6.0.jar:${project.dir}/build/test/lib/jars/asm-tree-6.0.jar:${project.dir}/build/test/lib/jars/asm-util-6.0.jar:${project.dir}/build/test/lib/jars/asm-xml-6.0.jar:${project.dir}/build/test/lib/jars/assertj-core-3.15.0.jar:${project.dir}/build/test/lib/jars/awaitility-4.0.3.jar:${project.dir}/build/test/lib/jars/byte-buddy-1.10.5.jar:${project.dir}/build/test/lib/jars/byte-buddy-agent-1.10.5.jar:${project.dir}/build/test/lib/jars/byteman-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-bmunit-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-install-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-submit-4.0.6.jar:${project.dir}/build/test/lib/jars/checker-qual-2.0.0.jar:${project.dir}/build/test/lib/jars/commons-io-2.6.jar:${project.dir}/build/test/lib/jars/commons-math3-3.2.jar:${project.dir}/build/test/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/test/lib/jars/dtest-api-0.0.7.jar:${project.dir}/build/test/lib/jars/error_prone_annotations-2.0.18.jar:${project.dir}/build/test/lib/jars/guava-23.5-android.jar:${project.dir}/build/test/lib/jars/hamcrest-2.2.jar:${project.dir}/build/test/lib/jars/j2objc-annotations-1.1.jar:${project.dir}/build/test/lib/jars/java-allocation-instrumenter-3.1.0.jar:${project.dir}/build/test/lib/jars/javassist-3.26.0-GA.jar:${project.dir}/build/test/lib/jars/jmh-core-1.21.jar:${project.dir}/build/test/lib/jars/jmh-generator-annprocess-1.21.jar:${project.dir}/build/test/lib/jars/jopt-simple-4.6.jar:${project.dir}/build/test/lib/jars/jsr305-1.3.9.jar:${project.dir}/build/test/lib/jars/junit-4.12.jar:${project.dir}/build/test/lib/jars/mockito-core-3.2.4.jar:${project.dir}/build/test/lib/jars/objenesis-2.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.agent-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.ant-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.core-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.report-0.8.6.jar:${project.dir}/build/test/lib/jars/quicktheories-0.26.jar:${project.dir}/build/test/lib/jars/reflections-0.9.12.jar:${project.dir}/build/test/lib/jars/slf4j-api-1.7.25.jar:</property>
</properties>
<folders>
<source-folder>
<selector> ::= [colname]=<cident> ( "[" ( <term> ( ".." <term> "]" )? | <term> ".." ) )?
| <udtSubfieldSelection>
| "WRITETIME" "(" [colname]=<cident> ")"
+ | "MAXWRITETIME" "(" [colname]=<cident> ")"
| "TTL" "(" [colname]=<cident> ")"
| "COUNT" "(" star=( "*" | "1" ) ")"
| "CAST" "(" <selector> "AS" <storageType> ")"
'''
cqlsh_show_cmd_syntax_rules = r'''
-<showCommand> ::= "SHOW" what=( "VERSION" | "HOST" | "SESSION" sessionid=<uuid> )
+<showCommand> ::= "SHOW" what=( "VERSION" | "HOST" | "SESSION" sessionid=<uuid> | "REPLICAS" token=<integer> (keyspace=<keyspaceName>)? )
;
'''
return colorme(bval, colormap, 'int')
-# We can get rid of this in cassandra-2.2
-if sys.version_info >= (2, 7):
- def format_integer_with_thousands_sep(val, thousands_sep=','):
- return "{:,.0f}".format(val).replace(',', thousands_sep)
-else:
- def format_integer_with_thousands_sep(val, thousands_sep=','):
- if val < 0:
- return '-' + format_integer_with_thousands_sep(-val, thousands_sep)
- result = ''
- while val >= 1000:
- val, r = divmod(val, 1000)
- result = "%s%03d%s" % (thousands_sep, r, result)
- return "%d%s" % (val, result)
+def format_integer_with_thousands_sep(val, thousands_sep=','):
+ return "{:,.0f}".format(val).replace(',', thousands_sep)
+
formatter_for('long')(format_integer_type)
formatter_for('int')(format_integer_type)
# See the License for the specific language governing permissions and
# limitations under the License.
+"""Pylexotron uses Python's re.Scanner module as a simple regex-based tokenizer for BNF production rules"""
+
import re
+import inspect
+import sys
+from typing import Union
from cqlshlib.saferscanner import SaferScanner
return '%s(%r)' % (self.__class__, self.text)
-def is_hint(x):
- return isinstance(x, Hint)
+def is_hint(obj):
+ return isinstance(obj, Hint)
class ParseContext:
% (self.__class__.__name__, self.matched, self.remainder, self.productionname, self.bindings)
-class matcher:
+class Matcher:
def __init__(self, arg):
self.arg = arg
return '%s(%r)' % (self.__class__.__name__, self.arg)
-class choice(matcher):
+class Choice(Matcher):
def match(self, ctxt, completions):
foundctxts = []
- for a in self.arg:
- subctxts = a.match(ctxt, completions)
+ for each in self.arg:
+ subctxts = each.match(ctxt, completions)
foundctxts.extend(subctxts)
return foundctxts
-class one_or_none(matcher):
+class OneOrNone(Matcher):
def match(self, ctxt, completions):
return [ctxt] + list(self.arg.match(ctxt, completions))
-class repeat(matcher):
+class Repeat(Matcher):
def match(self, ctxt, completions):
found = [ctxt]
ctxts = [ctxt]
while True:
new_ctxts = []
- for c in ctxts:
- new_ctxts.extend(self.arg.match(c, completions))
+ for each in ctxts:
+ new_ctxts.extend(self.arg.match(each, completions))
if not new_ctxts:
return found
found.extend(new_ctxts)
ctxts = new_ctxts
-class rule_reference(matcher):
+class RuleReference(Matcher):
def match(self, ctxt, completions):
prevname = ctxt.productionname
return [c.with_production_named(prevname) for c in output]
-class rule_series(matcher):
+class RuleSeries(Matcher):
def match(self, ctxt, completions):
ctxts = [ctxt]
for patpiece in self.arg:
new_ctxts = []
- for c in ctxts:
- new_ctxts.extend(patpiece.match(c, completions))
+ for each in ctxts:
+ new_ctxts.extend(patpiece.match(each, completions))
if not new_ctxts:
return ()
ctxts = new_ctxts
return ctxts
-class named_symbol(matcher):
+class NamedSymbol(Matcher):
def __init__(self, name, arg):
- matcher.__init__(self, arg)
+ Matcher.__init__(self, arg)
self.name = name
def match(self, ctxt, completions):
# don't collect other completions under this; use a dummy
pass_in_compls = set()
results = self.arg.match_with_results(ctxt, pass_in_compls)
- return [c.with_binding(self.name, ctxt.extract_orig(matchtoks)) for (c, matchtoks) in results]
+ return [c.with_binding(self.name, ctxt.extract_orig(matchtoks))
+ for (c, matchtoks) in results]
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.arg)
-class named_collector(named_symbol):
+class NamedCollector(NamedSymbol):
def match(self, ctxt, completions):
pass_in_compls = completions
return output
-class terminal_matcher(matcher):
+class TerminalMatcher(Matcher):
+
+ def match(self, ctxt, completions):
+ raise NotImplementedError
def pattern(self):
raise NotImplementedError
-class regex_rule(terminal_matcher):
+class RegexRule(TerminalMatcher):
def __init__(self, pat):
- terminal_matcher.__init__(self, pat)
+ TerminalMatcher.__init__(self, pat)
self.regex = pat
- self.re = re.compile(pat + '$', re.I | re.S)
+ self.re = re.compile(pat + '$', re.IGNORECASE | re.DOTALL)
def match(self, ctxt, completions):
if ctxt.remainder:
return self.regex
-class text_match(terminal_matcher):
+class TextMatch(TerminalMatcher):
alpha_re = re.compile(r'[a-zA-Z]')
def __init__(self, text):
try:
- terminal_matcher.__init__(self, eval(text))
+ TerminalMatcher.__init__(self, eval(text))
except SyntaxError:
print("bad syntax %r" % (text,))
def pattern(self):
# can't use (?i) here- Scanner component regex flags won't be applied
def ignorecaseify(matchobj):
- c = matchobj.group(0)
- return '[%s%s]' % (c.upper(), c.lower())
+ val = matchobj.group(0)
+ return '[%s%s]' % (val.upper(), val.lower())
+
return self.alpha_re.sub(ignorecaseify, re.escape(self.arg))
-class case_match(text_match):
+class CaseMatch(TextMatch):
def match(self, ctxt, completions):
if ctxt.remainder:
return re.escape(self.arg)
-class word_match(text_match):
+class WordMatch(TextMatch):
def pattern(self):
- return r'\b' + text_match.pattern(self) + r'\b'
+ return r'\b' + TextMatch.pattern(self) + r'\b'
-class case_word_match(case_match):
+class CaseWordMatch(CaseMatch):
def pattern(self):
- return r'\b' + case_match.pattern(self) + r'\b'
+ return r'\b' + CaseMatch.pattern(self) + r'\b'
-class terminal_type_matcher(matcher):
+class TerminalTypeMatcher(Matcher):
def __init__(self, tokentype, submatcher):
- matcher.__init__(self, tokentype)
+ Matcher.__init__(self, tokentype)
self.tokentype = tokentype
self.submatcher = submatcher
class ParsingRuleSet:
+ """Define the BNF tokenization rules for cql3handling.syntax_rules. Backus-Naur Form consists of
+ - Production rules in the form: Left-Hand-Side ::= Right-Hand-Side. The LHS is a non-terminal.
+ - Productions or non-terminal symbols
+ - Terminal symbols. Every terminal is a single token.
+ """
+
RuleSpecScanner = SaferScanner([
- (r'::=', lambda s, t: t),
+ (r'::=', lambda s, t: t), # BNF rule definition
(r'\[[a-z0-9_]+\]=', lambda s, t: ('named_collector', t[1:-2])),
(r'[a-z0-9_]+=', lambda s, t: ('named_symbol', t[:-1])),
(r'/(\[\^?.[^]]*\]|[^/]|\\.)*/', lambda s, t: ('regex', t[1:-1].replace(r'\/', '/'))),
- (r'"([^"]|\\.)*"', lambda s, t: ('litstring', t)),
+ (r'"([^"]|\\.)*"', lambda s, t: ('string_literal', t)),
(r'<[^>]*>', lambda s, t: ('reference', t[1:-1])),
(r'\bJUNK\b', lambda s, t: ('junk', t)),
(r'[@()|?*;]', lambda s, t: t),
- (r'\s+', None),
+ (r'\s+', None), # whitespace
(r'#[^\n]*', None),
- ], re.I | re.S | re.U)
+ ], re.IGNORECASE | re.DOTALL | re.UNICODE)
def __init__(self):
self.ruleset = {}
def parse_rules(cls, rulestr):
tokens, unmatched = cls.RuleSpecScanner.scan(rulestr)
if unmatched:
- raise LexingError.from_text(rulestr, unmatched, msg="Syntax rules unparseable")
+ raise LexingError.from_text(rulestr, unmatched, msg="Syntax rules are unparseable")
rules = {}
terminals = []
tokeniter = iter(tokens)
raise ValueError('Unexpected token %r; expected "::="' % (assign,))
name = t[1]
production = cls.read_rule_tokens_until(';', tokeniter)
- if isinstance(production, terminal_matcher):
+ if isinstance(production, TerminalMatcher):
terminals.append((name, production))
- production = terminal_type_matcher(name, production)
+ production = TerminalTypeMatcher(name, production)
rules[name] = production
else:
raise ValueError('Unexpected token %r; expected name' % (t,))
if isinstance(pieces, (tuple, list)):
if len(pieces) == 1:
return pieces[0]
- return rule_series(pieces)
+ return RuleSeries(pieces)
return pieces
@classmethod
- def read_rule_tokens_until(cls, endtoks, tokeniter):
+ def read_rule_tokens_until(cls, endtoks: Union[str, int], tokeniter):
if isinstance(endtoks, str):
endtoks = (endtoks,)
counttarget = None
if t in endtoks:
if len(mybranches) == 1:
return cls.mkrule(mybranches[0])
- return choice(list(map(cls.mkrule, mybranches)))
+ return Choice(list(map(cls.mkrule, mybranches)))
if isinstance(t, tuple):
if t[0] == 'reference':
- t = rule_reference(t[1])
- elif t[0] == 'litstring':
+ t = RuleReference(t[1])
+ elif t[0] == 'string_literal':
if t[1][1].isalnum() or t[1][1] == '_':
- t = word_match(t[1])
+ t = WordMatch(t[1])
else:
- t = text_match(t[1])
+ t = TextMatch(t[1])
elif t[0] == 'regex':
- t = regex_rule(t[1])
+ t = RegexRule(t[1])
elif t[0] == 'named_collector':
- t = named_collector(t[1], cls.read_rule_tokens_until(1, tokeniter))
+ t = NamedCollector(t[1], cls.read_rule_tokens_until(1, tokeniter))
elif t[0] == 'named_symbol':
- t = named_symbol(t[1], cls.read_rule_tokens_until(1, tokeniter))
+ t = NamedSymbol(t[1], cls.read_rule_tokens_until(1, tokeniter))
elif t == '(':
t = cls.read_rule_tokens_until(')', tokeniter)
elif t == '?':
- t = one_or_none(myrules.pop(-1))
+ t = OneOrNone(myrules.pop(-1))
elif t == '*':
- t = repeat(myrules.pop(-1))
+ t = Repeat(myrules.pop(-1))
elif t == '@':
- x = next(tokeniter)
- if not isinstance(x, tuple) or x[0] != 'litstring':
- raise ValueError("Unexpected token %r following '@'" % (x,))
- t = case_match(x[1])
+ val = next(tokeniter)
+ if not isinstance(val, tuple) or val[0] != 'string_literal':
+ raise ValueError("Unexpected token %r following '@'" % (val,))
+ t = CaseMatch(val[1])
elif t == '|':
myrules = []
mybranches.append(myrules)
if countsofar == counttarget:
if len(mybranches) == 1:
return cls.mkrule(mybranches[0])
- return choice(list(map(cls.mkrule, mybranches)))
+ return Choice(list(map(cls.mkrule, mybranches)))
raise ValueError('Unexpected end of rule tokens')
def append_rules(self, rulestr):
if name == 'JUNK':
return None
return lambda s, t: (name, t, s.match.span())
+
regexes = [(p.pattern(), make_handler(name)) for (name, p) in self.terminals]
- return SaferScanner(regexes, re.I | re.S | re.U).scan
+ return SaferScanner(regexes, re.IGNORECASE | re.DOTALL | re.UNICODE).scan
def lex(self, text):
if self.scanner is None:
bindings = {}
if srcstr is not None:
bindings['*SRC*'] = srcstr
- for c in self.parse(startsymbol, tokens, init_bindings=bindings):
- if not c.remainder:
- return c
+ for val in self.parse(startsymbol, tokens, init_bindings=bindings):
+ if not val.remainder:
+ return val
def lex_and_parse(self, text, startsymbol='Start'):
return self.parse(startsymbol, self.lex(text), init_bindings={'*SRC*': text})
return completions
-import sys
-
-
class Debugotron(set):
depth = 10
self._note_addition(item)
set.add(self, item)
- def _note_addition(self, foo):
- self.stream.write("\nitem %r added by:\n" % (foo,))
- frame = sys._getframe().f_back.f_back
+ def _note_addition(self, item):
+ self.stream.write("\nitem %r added by:\n" % (item,))
+ frame = inspect.currentframe().f_back.f_back
for i in range(self.depth):
name = frame.f_code.co_name
filename = frame.f_code.co_filename
%{_sysconfdir}/security/limits.d/%{username}.conf
/usr/share/%{username}*
%config(noreplace) /%{_sysconfdir}/%{username}
-%attr(755,%{username},%{username}) %config(noreplace) /var/lib/%{username}/*
-%attr(755,%{username},%{username}) /var/log/%{username}*
-%attr(755,%{username},%{username}) /var/run/%{username}*
+%attr(750,%{username},%{username}) %config(noreplace) /var/lib/%{username}/*
+%attr(750,%{username},%{username}) /var/log/%{username}*
+%attr(750,%{username},%{username}) /var/run/%{username}*
%{python_sitelib}/cqlshlib/
%{python_sitelib}/cassandra_pylib*.egg-info
--- /dev/null
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+These files create the 'noboolean' rpm packaging, using the same procedure as normal.
+These differ from the other packages by not using boolean dependency logic, intended for
+systems using rpmlib < 4.13.
+
+See CASSANDRA-17765 for more information.
--- /dev/null
+../cassandra
\ No newline at end of file
--- /dev/null
+../cassandra.conf
\ No newline at end of file
--- /dev/null
+../cassandra.in.sh
\ No newline at end of file
--- /dev/null
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+%define __jar_repack %{nil}
+# Turn off the brp-python-bytecompile script
+%global __os_install_post %(echo '%{__os_install_post}' | sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g')
+
+# rpmbuild should not barf when it spots we ship
+# binary executable files in our 'noarch' package
+%define _binaries_in_noarch_packages_terminate_build 0
+
+%define __python /usr/bin/python3
+
+%global username cassandra
+
+# input of ~alphaN, ~betaN, ~rcN package versions need to retain upstream '-alphaN, etc' version for sources
+%define upstream_version %(echo %{version} | sed -r 's/~/-/g')
+%define relname apache-cassandra-%{upstream_version}
+
+Name: cassandra
+Version: %{version}
+Release: %{revision}
+Summary: Cassandra is a highly scalable, eventually consistent, distributed, structured key-value store.
+
+Group: Development/Libraries
+License: Apache Software License 2.0
+URL: http://cassandra.apache.org/
+Source0: %{relname}-src.tar.gz
+BuildRoot: %{_tmppath}/%{relname}root-%(%{__id_u} -n)
+
+BuildRequires: ant >= 1.9
+BuildRequires: ant-junit >= 1.9
+
+Requires: jre >= 1.8.0
+Requires: python(abi) >= 3.6
+Requires: procps-ng >= 3.3
+Requires(pre): user(cassandra)
+Requires(pre): group(cassandra)
+Requires(pre): shadow-utils
+Provides: user(cassandra)
+Provides: group(cassandra)
+
+BuildArch: noarch
+
+# Don't examine the .so files we bundle for dependencies
+AutoReqProv: no
+
+%description
+Cassandra is a distributed (peer-to-peer) system for the management and storage of structured data.
+
+%prep
+%setup -q -n %{relname}-src
+
+%build
+export LANG=en_US.UTF-8
+export JAVA_TOOL_OPTIONS="-Dfile.encoding=UTF-8"
+ant clean jar -Dversion=%{upstream_version}
+
+%install
+%{__rm} -rf %{buildroot}
+mkdir -p %{buildroot}/%{_sysconfdir}/%{username}
+mkdir -p %{buildroot}/usr/share/%{username}
+mkdir -p %{buildroot}/usr/share/%{username}/lib
+mkdir -p %{buildroot}/%{_sysconfdir}/%{username}/default.conf
+mkdir -p %{buildroot}/%{_sysconfdir}/rc.d/init.d
+mkdir -p %{buildroot}/%{_sysconfdir}/security/limits.d
+mkdir -p %{buildroot}/%{_sysconfdir}/default
+mkdir -p %{buildroot}/usr/sbin
+mkdir -p %{buildroot}/usr/bin
+mkdir -p %{buildroot}/var/lib/%{username}/commitlog
+mkdir -p %{buildroot}/var/lib/%{username}/data
+mkdir -p %{buildroot}/var/lib/%{username}/saved_caches
+mkdir -p %{buildroot}/var/lib/%{username}/hints
+mkdir -p %{buildroot}/var/run/%{username}
+mkdir -p %{buildroot}/var/log/%{username}
+( cd pylib && %{__python} setup.py install --no-compile --root %{buildroot}; )
+
+# patches for data and log paths
+patch -p1 < debian/patches/cassandra_yaml_dirs.diff
+patch -p1 < debian/patches/cassandra_logdir_fix.diff
+# uncomment hints_directory path
+sed -i 's/^# hints_directory:/hints_directory:/' conf/cassandra.yaml
+
+# remove other files not being installed
+rm -f bin/*.orig
+rm -f bin/cassandra.in.sh
+rm -f lib/sigar-bin/*winnt* # strip segfaults on dll..
+rm -f tools/bin/cassandra.in.sh
+
+# copy default configs
+cp -pr conf/* %{buildroot}/%{_sysconfdir}/%{username}/default.conf/
+
+# step on default config with our redhat one
+cp -p redhat/%{username}.in.sh %{buildroot}/usr/share/%{username}/%{username}.in.sh
+cp -p redhat/%{username} %{buildroot}/%{_sysconfdir}/rc.d/init.d/%{username}
+cp -p redhat/%{username}.conf %{buildroot}/%{_sysconfdir}/security/limits.d/
+cp -p redhat/default %{buildroot}/%{_sysconfdir}/default/%{username}
+
+# copy cassandra bundled libs
+cp -pr lib/* %{buildroot}/usr/share/%{username}/lib/
+
+# copy stress jar
+cp -p build/tools/lib/stress.jar %{buildroot}/usr/share/%{username}/
+
+# copy fqltool jar
+cp -p build/tools/lib/fqltool.jar %{buildroot}/usr/share/%{username}/
+
+# copy binaries
+mv bin/cassandra %{buildroot}/usr/sbin/
+cp -p bin/* %{buildroot}/usr/bin/
+cp -p tools/bin/* %{buildroot}/usr/bin/
+
+# copy cassandra jar
+cp build/apache-cassandra-%{upstream_version}.jar %{buildroot}/usr/share/%{username}/
+
+%clean
+%{__rm} -rf %{buildroot}
+
+%pre
+getent group %{username} >/dev/null || groupadd -r %{username}
+getent passwd %{username} >/dev/null || \
+useradd -d /var/lib/%{username} -g %{username} -M -r %{username}
+exit 0
+
+%files
+%defattr(0644,root,root,0755)
+%doc CHANGES.txt LICENSE.txt README.asc NEWS.txt NOTICE.txt CASSANDRA-14092.txt
+%attr(755,root,root) %{_bindir}/auditlogviewer
+%attr(755,root,root) %{_bindir}/jmxtool
+%attr(755,root,root) %{_bindir}/cassandra-stress
+%attr(755,root,root) %{_bindir}/cqlsh
+%attr(755,root,root) %{_bindir}/cqlsh.py
+%attr(755,root,root) %{_bindir}/debug-cql
+%attr(755,root,root) %{_bindir}/fqltool
+%attr(755,root,root) %{_bindir}/generatetokens
+%attr(755,root,root) %{_bindir}/nodetool
+%attr(755,root,root) %{_bindir}/sstableloader
+%attr(755,root,root) %{_bindir}/sstablescrub
+%attr(755,root,root) %{_bindir}/sstableupgrade
+%attr(755,root,root) %{_bindir}/sstableutil
+%attr(755,root,root) %{_bindir}/sstableverify
+%attr(755,root,root) %{_bindir}/stop-server
+%attr(755,root,root) %{_sbindir}/cassandra
+%attr(755,root,root) /%{_sysconfdir}/rc.d/init.d/%{username}
+%{_sysconfdir}/default/%{username}
+%{_sysconfdir}/security/limits.d/%{username}.conf
+/usr/share/%{username}*
+%config(noreplace) /%{_sysconfdir}/%{username}
+%attr(750,%{username},%{username}) %config(noreplace) /var/lib/%{username}/*
+%attr(750,%{username},%{username}) /var/log/%{username}*
+%attr(750,%{username},%{username}) /var/run/%{username}*
+%{python_sitelib}/cqlshlib/
+%{python_sitelib}/cassandra_pylib*.egg-info
+
+%post
+alternatives --install /%{_sysconfdir}/%{username}/conf %{username} /%{_sysconfdir}/%{username}/default.conf/ 0
+exit 0
+
+%preun
+# only delete alternative on removal, not upgrade
+if [ "$1" = "0" ]; then
+ alternatives --remove %{username} /%{_sysconfdir}/%{username}/default.conf/
+fi
+exit 0
+
+
+%package tools
+Summary: Extra tools for Cassandra. Cassandra is a highly scalable, eventually consistent, distributed, structured key-value store.
+Group: Development/Libraries
+Requires: cassandra = %{version}-%{revision}
+
+%description tools
+Cassandra is a distributed (peer-to-peer) system for the management and storage of structured data.
+.
+This package contains extra tools for working with Cassandra clusters.
+
+%files tools
+%attr(755,root,root) %{_bindir}/sstabledump
+%attr(755,root,root) %{_bindir}/compaction-stress
+%attr(755,root,root) %{_bindir}/sstableexpiredblockers
+%attr(755,root,root) %{_bindir}/sstablelevelreset
+%attr(755,root,root) %{_bindir}/sstablemetadata
+%attr(755,root,root) %{_bindir}/sstableofflinerelevel
+%attr(755,root,root) %{_bindir}/sstablerepairedset
+%attr(755,root,root) %{_bindir}/sstablesplit
+%attr(755,root,root) %{_bindir}/auditlogviewer
+%attr(755,root,root) %{_bindir}/jmxtool
+%attr(755,root,root) %{_bindir}/fqltool
+%attr(755,root,root) %{_bindir}/generatetokens
+%attr(755,root,root) %{_bindir}/hash_password
+
+
+%changelog
+* Mon Dec 05 2016 Michael Shuler <mshuler@apache.org>
+- 2.1.17, 2.2.9, 3.0.11, 3.10
+- Reintroduce RPM packaging
--- /dev/null
+../default
\ No newline at end of file
K_TIMEUUID: T I M E U U I D;
K_TOKEN: T O K E N;
K_WRITETIME: W R I T E T I M E;
+K_MAXWRITETIME:M A X W R I T E T I M E;
K_DATE: D A T E;
K_TIME: T I M E;
;
selectionFunction returns [Selectable.Raw s]
- : K_COUNT '(' '\*' ')' { $s = Selectable.WithFunction.Raw.newCountRowsFunction(); }
- | K_WRITETIME '(' c=sident ')' { $s = new Selectable.WritetimeOrTTL.Raw(c, true); }
- | K_TTL '(' c=sident ')' { $s = new Selectable.WritetimeOrTTL.Raw(c, false); }
- | K_CAST '(' sn=unaliasedSelector K_AS t=native_type ')' {$s = new Selectable.WithCast.Raw(sn, t);}
- | f=functionName args=selectionFunctionArgs { $s = new Selectable.WithFunction.Raw(f, args); }
+ : K_COUNT '(' '\*' ')' { $s = Selectable.WithFunction.Raw.newCountRowsFunction(); }
+ | K_MAXWRITETIME '(' c=sident m=selectorModifier[c] ')' { $s = new Selectable.WritetimeOrTTL.Raw(c, m, Selectable.WritetimeOrTTL.Kind.MAX_WRITE_TIME); }
+ | K_WRITETIME '(' c=sident m=selectorModifier[c] ')' { $s = new Selectable.WritetimeOrTTL.Raw(c, m, Selectable.WritetimeOrTTL.Kind.WRITE_TIME); }
+ | K_TTL '(' c=sident m=selectorModifier[c] ')' { $s = new Selectable.WritetimeOrTTL.Raw(c, m, Selectable.WritetimeOrTTL.Kind.TTL); }
+ | K_CAST '(' sn=unaliasedSelector K_AS t=native_type ')' { $s = new Selectable.WithCast.Raw(sn, t);}
+ | f=functionName args=selectionFunctionArgs { $s = new Selectable.WithFunction.Raw(f, args); }
;
selectionLiteral returns [Term.Raw value]
unreserved_keyword returns [String str]
: u=unreserved_function_keyword { $str = u; }
- | k=(K_TTL | K_COUNT | K_WRITETIME | K_KEY | K_CAST | K_JSON | K_DISTINCT) { $str = $k.text; }
+ | k=(K_TTL | K_COUNT | K_WRITETIME | K_MAXWRITETIME | K_KEY | K_CAST | K_JSON | K_DISTINCT) { $str = $k.text; }
;
unreserved_function_keyword returns [String str]
package org.apache.cassandra.auth;
import java.net.InetAddress;
+import java.security.cert.Certificate;
import org.apache.cassandra.exceptions.ConfigurationException;
public class AllowAllInternodeAuthenticator implements IInternodeAuthenticator
{
- public boolean authenticate(InetAddress remoteAddress, int remotePort)
+ public boolean authenticate(InetAddress remoteAddress, int remotePort,
+ Certificate[] certificates, InternodeConnectionDirection connectionType)
{
return true;
}
import org.apache.cassandra.db.marshal.UTF8Type;
import org.apache.cassandra.exceptions.*;
import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.transport.messages.ResultMessage;
import org.apache.cassandra.utils.ByteBufferUtil;
{
// The delay is to give the node a chance to see its peers before attempting the operation
ScheduledExecutors.optionalTasks.scheduleSelfRecurring(() -> {
+ if (!StorageProxy.isSafeToPerformRead())
+ {
+ logger.trace("Setup task may not run due to it not being safe to perform reads... rescheduling");
+ scheduleSetupTask(setupTask);
+ return;
+ }
try
{
setupTask.call();
package org.apache.cassandra.auth;
import java.net.InetAddress;
+import java.security.cert.Certificate;
import org.apache.cassandra.exceptions.ConfigurationException;
* @param remotePort port of the connecting node.
* @return true if the connection should be accepted, false otherwise.
*/
- boolean authenticate(InetAddress remoteAddress, int remotePort);
+ @Deprecated
+ default boolean authenticate(InetAddress remoteAddress, int remotePort)
+ {
+ return false;
+ }
+
+ /**
+ * Decides whether a peer is allowed to connect to this node.
+ * If this method returns false, the socket will be immediately closed.
+ * <p>
+ * Default implementation calls authenticate method by IP and port method
+ * <p>
+ * 1. If it is IP based authentication ignore the certificates & connectionType parameters in the implementation
+ * of this method.
+ * 2. For certificate based authentication like mTLS, server's identity for outbound connections is verified by the
+ * trusted root certificates in the outbound_keystore. In such cases this method may be overridden to return true
+ * when certificateType is OUTBOUND, as the authentication of the server happens during SSL Handshake.
+ *
+ * @param remoteAddress ip address of the connecting node.
+ * @param remotePort port of the connecting node.
+ * @param certificates peer certificates
+ * @param connectionType If the connection is inbound/outbound connection.
+ * @return true if the connection should be accepted, false otherwise.
+ */
+ default boolean authenticate(InetAddress remoteAddress, int remotePort,
+ Certificate[] certificates, InternodeConnectionDirection connectionType)
+ {
+ return authenticate(remoteAddress, remotePort);
+ }
/**
* Validates configuration of IInternodeAuthenticator implementation (if configurable).
* @throws ConfigurationException when there is a configuration error.
*/
void validateConfiguration() throws ConfigurationException;
+
+ /**
+ * Setup is called once upon system startup to initialize the IAuthenticator.
+ *
+ * For example, use this method to create any required keyspaces/column families.
+ */
+ default void setupInternode()
+ {
+
+ }
+
+ /**
+ * Enum that represents connection type of internode connection.
+ *
+ * INBOUND - called after connection established, with certificate available if present.
+ * OUTBOUND - called after connection established, with certificate available if present.
+ * OUTBOUND_PRECONNECT - called before initiating a connection, without certificate available.
+ * The outbound connection will be authenticated with the certificate once a redirected connection is established.
+ * This is an extra check that can be used to detect misconfiguration before reconnection, or ignored by returning true.
+ */
+ enum InternodeConnectionDirection
+ {
+ INBOUND,
+ OUTBOUND,
+ OUTBOUND_PRECONNECT
+ }
}
--- /dev/null
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.concurrent;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * Interface to include on a Runnable or Callable submitted to the {@link SharedExecutorPool} to provide more
+ * detailed diagnostics.
+ */
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface DebuggableTask
+{
+ public long creationTimeNanos();
+
+ public long startTimeNanos();
+
+ public String description();
+
+ interface RunnableDebuggableTask extends Runnable, DebuggableTask {}
+
+ /**
+ * Wraps a {@link DebuggableTask} to include the name of the thread running it.
+ */
+ public static class RunningDebuggableTask implements DebuggableTask
+ {
+ private final DebuggableTask task;
+ private final String threadId;
+
+ public RunningDebuggableTask(String threadId, DebuggableTask task)
+ {
+ this.task = task;
+ this.threadId = threadId;
+ }
+
+ public String threadId()
+ {
+ return threadId;
+ }
+
+ public boolean hasTask()
+ {
+ return task != null;
+ }
+
+ @Override
+ public long creationTimeNanos()
+ {
+ assert hasTask();
+ return task.creationTimeNanos();
+ }
+
+ @Override
+ public long startTimeNanos()
+ {
+ assert hasTask();
+ return task.startTimeNanos();
+ }
+
+ @Override
+ public String description()
+ {
+ assert hasTask();
+ return task.description();
+ }
+ }
+}
import java.util.concurrent.Callable;
import java.util.concurrent.Future;
+import org.apache.cassandra.concurrent.DebuggableTask.RunnableDebuggableTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
return enforceOptions(withResources, wrap, false);
}
+ /**
+ * @see #suppressing(WithResources, Runnable)
+ */
+ static RunnableDebuggableTask suppressingDebuggable(WithResources withResources, RunnableDebuggableTask debuggable)
+ {
+ return enforceOptionsDebuggable(withResources, debuggable, false);
+ }
+
/**
* Encapsulate the execution, propagating or suppressing any exceptions as requested.
*
@Override
public void run()
{
- try (Closeable close = withResources.get())
+ try (@SuppressWarnings("unused") Closeable close = withResources.get())
{
wrap.run();
}
};
}
+ /**
+ * @see #enforceOptions(WithResources, Runnable, boolean)
+ */
+ private static RunnableDebuggableTask enforceOptionsDebuggable(WithResources withResources, RunnableDebuggableTask debuggable, boolean propagate)
+ {
+ return new RunnableDebuggableTask()
+ {
+ @Override
+ public void run()
+ {
+ try (@SuppressWarnings("unused") Closeable close = withResources.get())
+ {
+ debuggable.run();
+ }
+ catch (Throwable t)
+ {
+ handle(t);
+ if (propagate)
+ throw t;
+ }
+ }
+
+ @Override
+ public String toString()
+ {
+ return debuggable.toString();
+ }
+
+ @Override
+ public long creationTimeNanos()
+ {
+ return debuggable.creationTimeNanos();
+ }
+
+ @Override
+ public long startTimeNanos()
+ {
+ return debuggable.startTimeNanos();
+ }
+
+ @Override
+ public String description()
+ {
+ return debuggable.description();
+ }
+ };
+ }
+
/**
* See {@link #enforceOptions(WithResources, Callable)}
*/
@Override
public V call() throws Exception
{
- try (Closeable close = withResources.get())
+ try (@SuppressWarnings("unused") Closeable close = withResources.get())
{
return wrap.call();
}
// deliberately not volatile to ensure zero overhead outside of testing;
// depend on other memory visibility primitives to ensure visibility
private static ExecutorFactory FACTORY = new ExecutorFactory.Default(Global.class.getClassLoader(), null, JVMStabilityInspector::uncaughtException);
+ private static boolean modified;
+
public static ExecutorFactory executorFactory()
{
return FACTORY;
}
- public static void unsafeSet(ExecutorFactory executorFactory)
+ public static synchronized void unsafeSet(ExecutorFactory executorFactory)
{
FACTORY = executorFactory;
+ modified = true;
+ }
+
+ public static synchronized boolean tryUnsafeSet(ExecutorFactory executorFactory)
+ {
+ if (modified)
+ return false;
+ unsafeSet(executorFactory);
+ return true;
}
}
import java.util.concurrent.Callable;
-import org.apache.cassandra.utils.concurrent.RunnableFuture;
+import javax.annotation.Nullable;
import org.apache.cassandra.utils.concurrent.AsyncFuture;
+import org.apache.cassandra.utils.concurrent.RunnableFuture;
/**
* A FutureTask that utilises Cassandra's {@link AsyncFuture}, making it compatible with {@link ExecutorPlus}.
public class FutureTask<V> extends AsyncFuture<V> implements RunnableFuture<V>
{
private Callable<? extends V> call;
+ private volatile DebuggableTask debuggable;
public FutureTask(Callable<? extends V> call)
{
- this.call = call;
+ this(call, call instanceof DebuggableTask ? (DebuggableTask) call : null);
}
public FutureTask(Runnable run)
{
- this.call = callable(run);
+ this(callable(run), run instanceof DebuggableTask ? (DebuggableTask) run : null);
+ }
+
+ private FutureTask(Callable<? extends V> call, DebuggableTask debuggable)
+ {
+ this.call = call;
+ this.debuggable = debuggable;
+ }
+
+ @Nullable
+ DebuggableTask debuggableTask()
+ {
+ return debuggable;
}
V call() throws Exception
finally
{
call = null;
+ debuggable = null;
}
}
long prevStopCheck = 0;
long soleSpinnerSpinTime = 0;
+ private final AtomicReference<Runnable> currentTask = new AtomicReference<>();
+
SEPWorker(ThreadGroup threadGroup, Long workerId, Work initialState, SharedExecutorPool pool)
{
this.pool = pool;
thread.start();
}
+ /**
+ * @return the current {@link DebuggableTask}, if one exists
+ */
+ public DebuggableTask currentDebuggableTask()
+ {
+ // can change after null check so go off local reference
+ Runnable task = currentTask.get();
+
+ // Local read and mutation Runnables are themselves debuggable
+ if (task instanceof DebuggableTask)
+ return (DebuggableTask) task;
+
+ if (task instanceof FutureTask)
+ return ((FutureTask<?>) task).debuggableTask();
+
+ return null;
+ }
+
public void run()
{
- /**
+ /*
* we maintain two important invariants:
* 1) after exiting spinning phase, we ensure at least one more task on _each_ queue will be processed
* promptly after we begin, assuming any are outstanding on any pools. this is to permit producers to
if (assigned == null)
continue;
if (SET_THREAD_NAME)
- Thread.currentThread().setName(assigned.name + "-" + workerId);
+ Thread.currentThread().setName(assigned.name + '-' + workerId);
+
task = assigned.tasks.poll();
+ currentTask.lazySet(task);
// if we do have tasks assigned, nobody will change our state so we can simply set it to WORKING
// (which is also a state that will never be interrupted externally)
break;
task = assigned.tasks.poll();
+ currentTask.lazySet(task);
}
// return our work permit, and maybe signal shutdown
+ currentTask.lazySet(null);
+
if (status != RETURNED_WORK_PERMIT)
assigned.returnWorkPermit();
logger.error("Unexpected exception killed worker", t);
}
}
+ finally
+ {
+ currentTask.lazySet(null);
+ pool.workerEnded(this);
+ }
}
// try to assign this worker the provided work
return assigned != null;
}
}
+
+ @Override
+ public String toString()
+ {
+ return thread.getName();
+ }
+
+ @Override
+ public int hashCode()
+ {
+ return workerId.intValue();
+ }
+
+ @Override
+ public boolean equals(Object obj)
+ {
+ return obj == this;
+ }
}
*/
package org.apache.cassandra.concurrent;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.LockSupport;
+import java.util.stream.Collectors;
+
+import org.apache.cassandra.concurrent.DebuggableTask.RunningDebuggableTask;
import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
import static org.apache.cassandra.concurrent.SEPWorker.Work;
final ConcurrentSkipListMap<Long, SEPWorker> spinning = new ConcurrentSkipListMap<>();
// the collection of threads that have been asked to stop/deschedule - new workers are scheduled from here last
final ConcurrentSkipListMap<Long, SEPWorker> descheduled = new ConcurrentSkipListMap<>();
+ // All SEPWorkers that are currently running
+ private final Set<SEPWorker> allWorkers = Collections.newSetFromMap(new ConcurrentHashMap<>());
volatile boolean shuttingDown = false;
return;
if (!work.isStop())
- new SEPWorker(threadGroup, workerId.incrementAndGet(), work, this);
+ {
+ SEPWorker worker = new SEPWorker(threadGroup, workerId.incrementAndGet(), work, this);
+ allWorkers.add(worker);
+ }
+ }
+
+ void workerEnded(SEPWorker worker)
+ {
+ allWorkers.remove(worker);
+ }
+
+ public List<RunningDebuggableTask> runningTasks()
+ {
+ return allWorkers.stream()
+ .map(worker -> new RunningDebuggableTask(worker.toString(), worker.currentDebuggableTask()))
+ .filter(RunningDebuggableTask::hasTask)
+ .collect(Collectors.toList());
}
void maybeStartSpinningWorker()
import java.util.concurrent.Callable;
+import org.apache.cassandra.concurrent.DebuggableTask.RunnableDebuggableTask;
import org.apache.cassandra.utils.Shared;
import org.apache.cassandra.utils.WithResources;
import org.apache.cassandra.utils.concurrent.RunnableFuture;
@Override
public Runnable toExecute(Runnable runnable)
{
+ if (runnable instanceof RunnableDebuggableTask)
+ return ExecutionFailure.suppressingDebuggable(ExecutorLocals.propagate(), (RunnableDebuggableTask) runnable);
+
// no reason to propagate exception when it is inaccessible to caller
return ExecutionFailure.suppressing(ExecutorLocals.propagate(), runnable);
}
/** property for the interval on which the repeated client warnings and diagnostic events about disk usage are ignored */
DISK_USAGE_NOTIFY_INTERVAL_MS("cassandra.disk_usage.notify_interval_ms", Long.toString(TimeUnit.MINUTES.toMillis(30))),
+ /** Controls the type of bufffer (heap/direct) used for shared scratch buffers */
+ DATA_OUTPUT_BUFFER_ALLOCATE_TYPE("cassandra.dob.allocate_type"),
+
// for specific tests
ORG_APACHE_CASSANDRA_CONF_CASSANDRA_RELEVANT_PROPERTIES_TEST("org.apache.cassandra.conf.CassandraRelevantPropertiesTest"),
ORG_APACHE_CASSANDRA_DB_VIRTUAL_SYSTEM_PROPERTIES_TABLE_TEST("org.apache.cassandra.db.virtual.SystemPropertiesTableTest"),
-
;
System.setProperty(key, Long.toString(value));
}
+ /**
+ * Gets the value of a system property as a enum, calling {@link String#toUpperCase()} first.
+ *
+ * @param defaultValue to return when not defined
+ * @param <T> type
+ * @return enum value
+ */
+ public <T extends Enum<T>> T getEnum(T defaultValue) {
+ return getEnum(true, defaultValue);
+ }
+
+ /**
+ * Gets the value of a system property as a enum, optionally calling {@link String#toUpperCase()} first.
+ *
+ * @param toUppercase before converting to enum
+ * @param defaultValue to return when not defined
+ * @param <T> type
+ * @return enum value
+ */
+ public <T extends Enum<T>> T getEnum(boolean toUppercase, T defaultValue) {
+ String value = System.getProperty(key);
+ if (value == null)
+ return defaultValue;
+ return Enum.valueOf(defaultValue.getDeclaringClass(), toUppercase ? value.toUpperCase() : value);
+ }
+
+ /**
+ * Sets the value into system properties.
+ * @param value to set
+ */
+ public void setEnum(Enum<?> value) {
+ System.setProperty(key, value.name());
+ }
+
public interface PropertyConverter<T>
{
T convert(String value);
import java.util.TreeMap;
import java.util.function.Supplier;
+import javax.annotation.Nullable;
+
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
public Integer unlogged_batch_across_partitions_warn_threshold = 10;
public volatile Integer concurrent_compactors;
@Replaces(oldName = "compaction_throughput_mb_per_sec", converter = Converters.MEBIBYTES_PER_SECOND_DATA_RATE, deprecated = true)
- public volatile DataRateSpec.IntMebibytesPerSecondBound compaction_throughput = new DataRateSpec.IntMebibytesPerSecondBound("16MiB/s");
+ public volatile DataRateSpec.LongBytesPerSecondBound compaction_throughput = new DataRateSpec.LongBytesPerSecondBound("64MiB/s");
@Replaces(oldName = "compaction_large_partition_warning_threshold_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
public volatile DataStorageSpec.IntMebibytesBound compaction_large_partition_warning_threshold = new DataStorageSpec.IntMebibytesBound("100MiB");
@Replaces(oldName = "min_free_space_per_drive_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
public volatile int concurrent_materialized_view_builders = 1;
public volatile int reject_repair_compaction_threshold = Integer.MAX_VALUE;
+ // The number of executors to use for building secondary indexes
+ public int concurrent_index_builders = 2;
+
/**
* @deprecated retry support removed on CASSANDRA-10992
*/
@Deprecated
public int max_streaming_retries = 3;
- @Replaces(oldName = "stream_throughput_outbound_megabits_per_sec", converter = Converters.MEGABITS_TO_MEBIBYTES_PER_SECOND_DATA_RATE, deprecated = true)
- public volatile DataRateSpec.IntMebibytesPerSecondBound stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound("24MiB/s");
- @Replaces(oldName = "inter_dc_stream_throughput_outbound_megabits_per_sec", converter = Converters.MEGABITS_TO_MEBIBYTES_PER_SECOND_DATA_RATE, deprecated = true)
- public volatile DataRateSpec.IntMebibytesPerSecondBound inter_dc_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound("24MiB/s");
+ @Replaces(oldName = "stream_throughput_outbound_megabits_per_sec", converter = Converters.MEGABITS_TO_BYTES_PER_SECOND_DATA_RATE, deprecated = true)
+ public volatile DataRateSpec.LongBytesPerSecondBound stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
+ @Replaces(oldName = "inter_dc_stream_throughput_outbound_megabits_per_sec", converter = Converters.MEGABITS_TO_BYTES_PER_SECOND_DATA_RATE, deprecated = true)
+ public volatile DataRateSpec.LongBytesPerSecondBound inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
- public volatile DataRateSpec.IntMebibytesPerSecondBound entire_sstable_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound("24MiB/s");
- public volatile DataRateSpec.IntMebibytesPerSecondBound entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound("24MiB/s");
+ public volatile DataRateSpec.LongBytesPerSecondBound entire_sstable_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
+ public volatile DataRateSpec.LongBytesPerSecondBound entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
public String[] data_file_directories = new String[0];
// When true, new CDC mutations are rejected/blocked when reaching max CDC storage.
// When false, new CDC mutations can always be added. But it will remove the oldest CDC commit log segment on full.
public volatile boolean cdc_block_writes = true;
+ // When true, CDC data in SSTable go through commit logs during internodes streaming, e.g. repair
+ // When false, it behaves the same as normal streaming.
+ public volatile boolean cdc_on_repair_enabled = true;
public String cdc_raw_directory;
@Replaces(oldName = "cdc_total_space_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
public DataStorageSpec.IntMebibytesBound cdc_total_space = new DataStorageSpec.IntMebibytesBound("0MiB");
@Replaces(oldName = "trickle_fsync_interval_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true)
public DataStorageSpec.IntKibibytesBound trickle_fsync_interval = new DataStorageSpec.IntKibibytesBound("10240KiB");
- @Replaces(oldName = "sstable_preemptive_open_interval_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+ @Nullable
+ @Replaces(oldName = "sstable_preemptive_open_interval_in_mb", converter = Converters.NEGATIVE_MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
public volatile DataStorageSpec.IntMebibytesBound sstable_preemptive_open_interval = new DataStorageSpec.IntMebibytesBound("50MiB");
public volatile boolean key_cache_migrate_during_compaction = true;
public DataStorageSpec.LongMebibytesBound paxos_cache_size = null;
- @Replaces(oldName = "cache_load_timeout_seconds", converter = Converters.SECONDS_DURATION, deprecated = true)
+ @Replaces(oldName = "cache_load_timeout_seconds", converter = Converters.NEGATIVE_SECONDS_DURATION, deprecated = true)
public DurationSpec.IntSecondsBound cache_load_timeout = new DurationSpec.IntSecondsBound("30s");
private static boolean isClientMode = false;
@Replaces(oldName = "index_summary_capacity_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_LONG, deprecated = true)
public volatile DataStorageSpec.LongMebibytesBound index_summary_capacity;
- @Replaces(oldName = "index_summary_resize_interval_in_minutes", converter = Converters.MINUTES_DURATION, deprecated = true)
+ @Nullable
+ @Replaces(oldName = "index_summary_resize_interval_in_minutes", converter = Converters.MINUTES_CUSTOM_DURATION, deprecated = true)
public volatile DurationSpec.IntMinutesBound index_summary_resize_interval = new DurationSpec.IntMinutesBound("60m");
@Replaces(oldName = "gc_log_threshold_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
- public DurationSpec.IntMillisecondsBound gc_log_threshold = new DurationSpec.IntMillisecondsBound("200ms");
+ public volatile DurationSpec.IntMillisecondsBound gc_log_threshold = new DurationSpec.IntMillisecondsBound("200ms");
@Replaces(oldName = "gc_warn_threshold_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
- public DurationSpec.IntMillisecondsBound gc_warn_threshold = new DurationSpec.IntMillisecondsBound("1s");
+ public volatile DurationSpec.IntMillisecondsBound gc_warn_threshold = new DurationSpec.IntMillisecondsBound("1s");
// TTL for different types of trace events.
@Replaces(oldName = "tracetype_query_ttl", converter = Converters.SECONDS_DURATION, deprecated=true)
public volatile boolean auto_optimise_preview_repair_streams = false;
// see CASSANDRA-17048 and the comment in cassandra.yaml
- public boolean enable_uuid_sstable_identifiers = false;
+ public boolean uuid_sstable_identifiers_enabled = false;
/**
* Client mode means that the process is a pure client, that uses C* code base but does
public volatile Set<ConsistencyLevel> write_consistency_levels_warned = Collections.emptySet();
public volatile Set<ConsistencyLevel> write_consistency_levels_disallowed = Collections.emptySet();
public volatile boolean user_timestamps_enabled = true;
+ public volatile boolean alter_table_enabled = true;
public volatile boolean group_by_enabled = true;
public volatile boolean drop_truncate_table_enabled = true;
+ public volatile boolean drop_keyspace_enabled = true;
public volatile boolean secondary_indexes_enabled = true;
public volatile boolean uncompressed_tables_enabled = true;
public volatile boolean compact_tables_enabled = true;
public volatile boolean read_before_write_list_operations_enabled = true;
public volatile boolean allow_filtering_enabled = true;
+ public volatile boolean simplestrategy_enabled = true;
public volatile DataStorageSpec.LongBytesBound collection_size_warn_threshold = null;
public volatile DataStorageSpec.LongBytesBound collection_size_fail_threshold = null;
public volatile int items_per_collection_warn_threshold = -1;
public volatile DataStorageSpec.LongBytesBound data_disk_usage_max_disk_size = null;
public volatile int minimum_replication_factor_warn_threshold = -1;
public volatile int minimum_replication_factor_fail_threshold = -1;
+ public volatile int maximum_replication_factor_warn_threshold = -1;
+ public volatile int maximum_replication_factor_fail_threshold = -1;
public volatile DurationSpec.LongNanosecondsBound streaming_state_expires = new DurationSpec.LongNanosecondsBound("3d");
public volatile DataStorageSpec.LongBytesBound streaming_state_size = new DataStorageSpec.LongBytesBound("40MiB");
public volatile int max_top_size_partition_count = 10;
public volatile int max_top_tombstone_partition_count = 10;
- public volatile DataStorageSpec.LongBytesBound min_tracked_partition_size_bytes = new DataStorageSpec.LongBytesBound("1MiB");
+ public volatile DataStorageSpec.LongBytesBound min_tracked_partition_size = new DataStorageSpec.LongBytesBound("1MiB");
public volatile long min_tracked_partition_tombstone_count = 5000;
public volatile boolean top_partitions_enabled = true;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
+import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND;
+
/**
* Converters for backward compatibility with the old cassandra.yaml where duration, data rate and
* data storage configuration parameters were provided only by value and the expected unit was part of the configuration
IDENTITY(null, null, o -> o, o -> o),
MILLIS_DURATION_LONG(Long.class, DurationSpec.LongMillisecondsBound.class,
DurationSpec.LongMillisecondsBound::new,
- o -> o.toMilliseconds()),
+ o -> o == null ? null : o.toMilliseconds()),
MILLIS_DURATION_INT(Integer.class, DurationSpec.IntMillisecondsBound.class,
DurationSpec.IntMillisecondsBound::new,
- DurationSpec.IntMillisecondsBound::toMilliseconds),
+ o -> o == null ? null : o.toMilliseconds()),
MILLIS_DURATION_DOUBLE(Double.class, DurationSpec.IntMillisecondsBound.class,
o -> Double.isNaN(o) ? new DurationSpec.IntMillisecondsBound(0) :
new DurationSpec.IntMillisecondsBound(o, TimeUnit.MILLISECONDS),
o -> o == null ? -1 : o.toMilliseconds()),
SECONDS_DURATION(Integer.class, DurationSpec.IntSecondsBound.class,
DurationSpec.IntSecondsBound::new,
- DurationSpec.IntSecondsBound::toSeconds),
+ o -> o == null ? null : o.toSeconds()),
NEGATIVE_SECONDS_DURATION(Integer.class, DurationSpec.IntSecondsBound.class,
o -> o < 0 ? new DurationSpec.IntSecondsBound(0) : new DurationSpec.IntSecondsBound(o),
- DurationSpec.IntSecondsBound::toSeconds),
+ o -> o == null ? null : o.toSeconds()),
/**
* This converter is used to support backward compatibility for Duration parameters where we added the opportunity
* for the users to add a unit in the parameters' values but we didn't change the names. (key_cache_save_period,
*/
SECONDS_CUSTOM_DURATION(String.class, DurationSpec.IntSecondsBound.class,
DurationSpec.IntSecondsBound::inSecondsString,
- o -> Long.toString(o.toSeconds())),
- MINUTES_DURATION(Integer.class, DurationSpec.IntMinutesBound.class,
- DurationSpec.IntMinutesBound::new,
- DurationSpec.IntMinutesBound::toMinutes),
+ o -> o == null ? null : Long.toString(o.toSeconds())),
+ /**
+ * This converter is used to support backward compatibility for parameters where in the past -1 was used as a value
+ * Example: index_summary_resize_interval_in_minutes = -1 and index_summary_resize_interval = null are equal.
+ */
+ MINUTES_CUSTOM_DURATION(Integer.class, DurationSpec.IntMinutesBound.class,
+ o -> o == -1 ? null : new DurationSpec.IntMinutesBound(o),
+ o -> o == null ? -1 : o.toMinutes()),
MEBIBYTES_DATA_STORAGE_LONG(Long.class, DataStorageSpec.LongMebibytesBound.class,
DataStorageSpec.LongMebibytesBound::new,
- DataStorageSpec.LongMebibytesBound::toMebibytes),
+ o -> o == null ? null : o.toMebibytes()),
MEBIBYTES_DATA_STORAGE_INT(Integer.class, DataStorageSpec.IntMebibytesBound.class,
DataStorageSpec.IntMebibytesBound::new,
- DataStorageSpec.IntMebibytesBound::toMebibytes),
+ o -> o == null ? null : o.toMebibytes()),
+ NEGATIVE_MEBIBYTES_DATA_STORAGE_INT(Integer.class, DataStorageSpec.IntMebibytesBound.class,
+ o -> o < 0 ? null : new DataStorageSpec.IntMebibytesBound(o),
+ o -> o == null ? -1 : o.toMebibytes()),
KIBIBYTES_DATASTORAGE(Integer.class, DataStorageSpec.IntKibibytesBound.class,
DataStorageSpec.IntKibibytesBound::new,
- DataStorageSpec.IntKibibytesBound::toKibibytes),
+ o -> o == null ? null : o.toKibibytes()),
BYTES_DATASTORAGE(Integer.class, DataStorageSpec.IntBytesBound.class,
DataStorageSpec.IntBytesBound::new,
- DataStorageSpec.IntBytesBound::toBytes),
+ o -> o == null ? null : o.toBytes()),
/**
* This converter is used to support backward compatibility for parameters where in the past negative number was used as a value
* Example: native_transport_max_concurrent_requests_in_bytes_per_ip = -1 and native_transport_max_request_data_in_flight_per_ip = null
*/
BYTES_CUSTOM_DATASTORAGE(Long.class, DataStorageSpec.LongBytesBound.class,
o -> o == -1 ? null : new DataStorageSpec.LongBytesBound(o),
- DataStorageSpec.LongBytesBound::toBytes),
- MEBIBYTES_PER_SECOND_DATA_RATE(Integer.class, DataRateSpec.IntMebibytesPerSecondBound.class,
- DataRateSpec.IntMebibytesPerSecondBound::new,
- DataRateSpec.IntMebibytesPerSecondBound::toMebibytesPerSecondAsInt),
+ o -> o == null ? null : o.toBytes()),
+ MEBIBYTES_PER_SECOND_DATA_RATE(Integer.class, DataRateSpec.LongBytesPerSecondBound.class,
+ i -> new DataRateSpec.LongBytesPerSecondBound(i, MEBIBYTES_PER_SECOND),
+ o -> o == null ? null : o.toMebibytesPerSecondAsInt()),
/**
* This converter is a custom one to support backward compatibility for stream_throughput_outbound and
- * inter_dc_stream_throughput_outbound which were provided in megatibs per second prior CASSANDRA-15234.
+ * inter_dc_stream_throughput_outbound which were provided in megabits per second prior CASSANDRA-15234.
*/
- MEGABITS_TO_MEBIBYTES_PER_SECOND_DATA_RATE(Integer.class, DataRateSpec.IntMebibytesPerSecondBound.class,
- i -> DataRateSpec.IntMebibytesPerSecondBound.megabitsPerSecondInMebibytesPerSecond(i),
- DataRateSpec.IntMebibytesPerSecondBound::toMegabitsPerSecondAsInt);
+ MEGABITS_TO_BYTES_PER_SECOND_DATA_RATE(Integer.class, DataRateSpec.LongBytesPerSecondBound.class,
+ i -> DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(i),
+ o -> o == null ? null : o.toMegabitsPerSecondAsInt());
private final Class<?> oldType;
private final Class<?> newType;
private final Function<Object, Object> convert;
*/
public Object unconvert(Object value)
{
- if (value == null) return null;
return reverseConvert.apply(value);
}
}
import java.util.regex.Pattern;
import java.util.stream.Collectors;
+import com.google.common.math.DoubleMath;
import com.google.common.primitives.Ints;
import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.BYTES_PER_SECOND;
-import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND;
/**
* Represents a data rate type used for cassandra configuration. It supports the opportunity for the users to be able to
*/
private static final Pattern UNITS_PATTERN = Pattern.compile("^(\\d+)(MiB/s|KiB/s|B/s)$");
- private final double quantity;
+ private final long quantity;
private final DataRateUnit unit;
throw new IllegalArgumentException("Invalid data rate: " + value + " Accepted units: MiB/s, KiB/s, B/s where " +
"case matters and " + "only non-negative values are valid");
- quantity = (double) Long.parseLong(matcher.group(1));
+ quantity = Long.parseLong(matcher.group(1));
unit = DataRateUnit.fromSymbol(matcher.group(2));
}
validateQuantity(value, quantity(), unit(), minUnit, max);
}
- private DataRateSpec(double quantity, DataRateUnit unit, DataRateUnit minUnit, long max)
+ private DataRateSpec(long quantity, DataRateUnit unit, DataRateUnit minUnit, long max)
{
this.quantity = quantity;
this.unit = unit;
@Override
public String toString()
{
- return Math.round(quantity) + unit.symbol;
+ return (DoubleMath.isMathematicalInteger(quantity) ? (long) quantity : quantity) + unit.symbol;
}
/**
* @param quantity where quantity shouldn't be bigger than Long.MAX_VALUE - 1 in bytes per second
* @param unit in which the provided quantity is
*/
- public LongBytesPerSecondBound(double quantity, DataRateUnit unit)
+ public LongBytesPerSecondBound(long quantity, DataRateUnit unit)
{
super(quantity, unit, BYTES_PER_SECOND, Long.MAX_VALUE);
}
{
this(bytesPerSecond, BYTES_PER_SECOND);
}
- }
-
- /**
- * Represents a data rate used for Cassandra configuration. The bound is [0, Integer.MAX_VALUE) in mebibytes per second.
- * If the user sets a different unit - we still validate that converted to mebibytes per second the quantity will not exceed
- * that upper bound. (CASSANDRA-17571)
- */
- public final static class IntMebibytesPerSecondBound extends DataRateSpec
- {
- /**
- * Creates a {@code DataRateSpec.IntMebibytesPerSecondBound} of the specified amount with bound [0, Integer.MAX_VALUE) mebibytes per second.
- *
- * @param value the data rate
- */
- public IntMebibytesPerSecondBound(String value)
- {
- super(value, MEBIBYTES_PER_SECOND, Integer.MAX_VALUE);
- }
-
- /**
- * Creates a {@code DataRateSpec.IntMebibytesPerSecondBound} of the specified amount in the specified unit.
- *
- * @param quantity where quantity shouldn't be bigger than Integer.MAX_VALUE - 1 in mebibytes per second
- * @param unit in which the provided quantity is
- */
- public IntMebibytesPerSecondBound(double quantity, DataRateUnit unit)
- {
- super(quantity, unit, MEBIBYTES_PER_SECOND, Integer.MAX_VALUE);
- }
-
- /**
- * Creates a {@code DataRateSpec.IntMebibytesPerSecondBound} of the specified amount in mebibytes per second.
- *
- * @param mebibytesPerSecond where mebibytesPerSecond shouldn't be bigger than Long.MAX_VALUE-1
- */
- public IntMebibytesPerSecondBound(long mebibytesPerSecond)
- {
- this (mebibytesPerSecond, MEBIBYTES_PER_SECOND);
- }
// this one should be used only for backward compatibility for stream_throughput_outbound and inter_dc_stream_throughput_outbound
// which were in megabits per second in 4.0. Do not start using it for any new properties
- public static IntMebibytesPerSecondBound megabitsPerSecondInMebibytesPerSecond(long megabitsPerSecond)
+ @Deprecated
+ public static LongBytesPerSecondBound megabitsPerSecondInBytesPerSecond(long megabitsPerSecond)
{
- final double MEBIBYTES_PER_MEGABIT = 0.119209289550781;
- double mebibytesPerSecond = (double) megabitsPerSecond * MEBIBYTES_PER_MEGABIT;
+ final long BYTES_PER_MEGABIT = 125_000;
+ long bytesPerSecond = megabitsPerSecond * BYTES_PER_MEGABIT;
if (megabitsPerSecond >= Integer.MAX_VALUE)
throw new IllegalArgumentException("Invalid data rate: " + megabitsPerSecond + " megabits per second; " +
- "stream_throughput_outbound and inter_dc_stream_throughput_outbound" +
- " should be between 0 and " + Integer.MAX_VALUE + " in megabits per second");
+ "stream_throughput_outbound and inter_dc_stream_throughput_outbound" +
+ " should be between 0 and " + (Integer.MAX_VALUE - 1) + " in megabits per second");
- return new IntMebibytesPerSecondBound(mebibytesPerSecond, MEBIBYTES_PER_SECOND);
+ return new LongBytesPerSecondBound(bytesPerSecond, BYTES_PER_SECOND);
}
}
{
if (d > MAX / (MEGABITS_PER_MEBIBYTE))
return MAX;
- return Math.round(d * MEGABITS_PER_MEBIBYTE);
+ return d * MEGABITS_PER_MEBIBYTE;
}
public double convert(double source, DataRateUnit sourceUnit)
{
return Ints.saturatedCast(unit().toKibibytes(quantity()));
}
+
+ /**
+ * @return the amount of data storage in bytes.
+ */
+ public long toBytesInLong()
+ {
+ return unit().toBytes(quantity());
+ }
}
/**
{
return Ints.saturatedCast(unit().toMebibytes(quantity()));
}
+
+ /**
+ * Returns the amount of data storage in bytes as {@code long}
+ *
+ * @return the amount of data storage in bytes.
+ */
+ public long toBytesInLong()
+ {
+ return unit().toBytes(quantity());
+ }
}
public enum DataStorageUnit
import static org.apache.cassandra.config.CassandraRelevantProperties.OS_ARCH;
import static org.apache.cassandra.config.CassandraRelevantProperties.SUN_ARCH_DATA_MODEL;
import static org.apache.cassandra.config.CassandraRelevantProperties.TEST_JVM_DTEST_DISABLE_SSL;
+import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.BYTES_PER_SECOND;
+import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND;
import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.MEBIBYTES;
import static org.apache.cassandra.io.util.FileUtils.ONE_GIB;
import static org.apache.cassandra.io.util.FileUtils.ONE_MIB;
if (clientInitialized)
return;
clientInitialized = true;
-
+ setDefaultFailureDetector();
Config.setClientMode(true);
conf = new Config();
diskOptimizationStrategy = new SpinningDiskOptimizationStrategy();
//InetAddressAndPort and get the right defaults
InetAddressAndPort.initializeDefaultPort(getStoragePort());
- // below 2 checks are needed in order to match the pre-CASSANDRA-15234 upper bound for those parameters which were still in megabits per second
- if (conf.stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
- {
- throw new ConfigurationException("Invalid value of stream_throughput_outbound: " + conf.stream_throughput_outbound.toString(), false);
- }
-
- if (conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
- {
- throw new ConfigurationException("Invalid value of inter_dc_stream_throughput_outbound: " + conf.inter_dc_stream_throughput_outbound.toString(), false);
- }
+ validateUpperBoundStreamingConfig();
if (conf.auto_snapshot_ttl != null)
{
if (preparedStatementsCacheSizeInMiB == 0)
throw new NumberFormatException(); // to escape duplicating error message
+
+ // we need this assignment for the Settings virtual table - CASSANDRA-17734
+ conf.prepared_statements_cache_size = new DataStorageSpec.LongMebibytesBound(preparedStatementsCacheSizeInMiB);
}
catch (NumberFormatException e)
{
if (keyCacheSizeInMiB < 0)
throw new NumberFormatException(); // to escape duplicating error message
+
+ // we need this assignment for the Settings Virtual Table - CASSANDRA-17734
+ conf.key_cache_size = new DataStorageSpec.LongMebibytesBound(keyCacheSizeInMiB);
}
catch (NumberFormatException e)
{
+ conf.paxos_cache_size + "', supported values are <integer> >= 0.", false);
}
+ // we need this assignment for the Settings virtual table - CASSANDRA-17735
+ conf.counter_cache_size = new DataStorageSpec.LongMebibytesBound(counterCacheSizeInMiB);
+
// if set to empty/"auto" then use 5% of Heap size
indexSummaryCapacityInMiB = (conf.index_summary_capacity == null)
? Math.max(1, (int) (Runtime.getRuntime().totalMemory() * 0.05 / 1024 / 1024))
throw new ConfigurationException("index_summary_capacity option was set incorrectly to '"
+ conf.index_summary_capacity.toString() + "', it should be a non-negative integer.", false);
+ // we need this assignment for the Settings virtual table - CASSANDRA-17735
+ conf.index_summary_capacity = new DataStorageSpec.LongMebibytesBound(indexSummaryCapacityInMiB);
+
if (conf.user_defined_functions_fail_timeout.toMilliseconds() < conf.user_defined_functions_warn_timeout.toMilliseconds())
throw new ConfigurationException("user_defined_functions_warn_timeout must less than user_defined_function_fail_timeout", false);
logInitializationOutcome(logger);
}
+ @VisibleForTesting
+ static void validateUpperBoundStreamingConfig() throws ConfigurationException
+ {
+ // below 2 checks are needed in order to match the pre-CASSANDRA-15234 upper bound for those parameters which were still in megabits per second
+ if (conf.stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
+ {
+ throw new ConfigurationException("Invalid value of stream_throughput_outbound: " + conf.stream_throughput_outbound.toString(), false);
+ }
+
+ if (conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
+ {
+ throw new ConfigurationException("Invalid value of inter_dc_stream_throughput_outbound: " + conf.inter_dc_stream_throughput_outbound.toString(), false);
+ }
+
+ if (conf.entire_sstable_stream_throughput_outbound.toMebibytesPerSecond() >= Integer.MAX_VALUE)
+ {
+ throw new ConfigurationException("Invalid value of entire_sstable_stream_throughput_outbound: " + conf.entire_sstable_stream_throughput_outbound.toString(), false);
+ }
+
+ if (conf.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecond() >= Integer.MAX_VALUE)
+ {
+ throw new ConfigurationException("Invalid value of entire_sstable_inter_dc_stream_throughput_outbound: " + conf.entire_sstable_inter_dc_stream_throughput_outbound.toString(), false);
+ }
+
+ if (conf.compaction_throughput.toMebibytesPerSecond() >= Integer.MAX_VALUE)
+ {
+ throw new ConfigurationException("Invalid value of compaction_throughput: " + conf.compaction_throughput.toString(), false);
+ }
+ }
+
@VisibleForTesting
static void applyConcurrentValidations(Config config)
{
public static void setColumnIndexSize(int val)
{
- DataStorageSpec.IntKibibytesBound memory = new DataStorageSpec.IntKibibytesBound(val);
- checkValidForByteConversion(memory, "column_index_size");
- conf.column_index_size = new DataStorageSpec.IntKibibytesBound(val);
+ conf.column_index_size = createIntKibibyteBoundAndEnsureItIsValidForByteConversion(val,"column_index_size");
}
public static int getColumnIndexCacheSize()
public static void setColumnIndexCacheSize(int val)
{
- DataStorageSpec.IntKibibytesBound memory = new DataStorageSpec.IntKibibytesBound(val);
- checkValidForByteConversion(memory, "column_index_cache_size");
- conf.column_index_cache_size = new DataStorageSpec.IntKibibytesBound(val);
+ conf.column_index_cache_size = createIntKibibyteBoundAndEnsureItIsValidForByteConversion(val,"column_index_cache_size");
}
public static int getBatchSizeWarnThreshold()
public static long getBatchSizeFailThreshold()
{
- return conf.batch_size_fail_threshold.toBytes();
+ return conf.batch_size_fail_threshold.toBytesInLong();
}
public static int getBatchSizeFailThresholdInKiB()
public static void setBatchSizeWarnThresholdInKiB(int threshold)
{
- DataStorageSpec.IntKibibytesBound storage = new DataStorageSpec.IntKibibytesBound(threshold);
- checkValidForByteConversion(storage, "batch_size_warn_threshold");
- conf.batch_size_warn_threshold = new DataStorageSpec.IntKibibytesBound(threshold);
+ conf.batch_size_warn_threshold = createIntKibibyteBoundAndEnsureItIsValidForByteConversion(threshold,"batch_size_warn_threshold");
}
public static void setBatchSizeFailThresholdInKiB(int threshold)
return conf.compaction_throughput.toMebibytesPerSecondAsInt();
}
+ public static double getCompactionThroughputBytesPerSec()
+ {
+ return conf.compaction_throughput.toBytesPerSecond();
+ }
+
public static double getCompactionThroughputMebibytesPerSec()
{
return conf.compaction_throughput.toMebibytesPerSecond();
}
+ @VisibleForTesting // only for testing!
+ public static void setCompactionThroughputBytesPerSec(int value)
+ {
+ if (BYTES_PER_SECOND.toMebibytesPerSecond(value) >= Integer.MAX_VALUE)
+ throw new IllegalArgumentException("compaction_throughput: " + value +
+ " is too large; it should be less than " +
+ Integer.MAX_VALUE + " in MiB/s");
+
+ conf.compaction_throughput = new DataRateSpec.LongBytesPerSecondBound(value);
+ }
+
public static void setCompactionThroughputMebibytesPerSec(int value)
{
- conf.compaction_throughput = new DataRateSpec.IntMebibytesPerSecondBound(value);
+ if (value == Integer.MAX_VALUE)
+ throw new IllegalArgumentException("compaction_throughput: " + value +
+ " is too large; it should be less than " +
+ Integer.MAX_VALUE + " in MiB/s");
+
+ conf.compaction_throughput = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
}
- public static long getCompactionLargePartitionWarningThreshold() { return conf.compaction_large_partition_warning_threshold.toBytes(); }
+ public static long getCompactionLargePartitionWarningThreshold() { return conf.compaction_large_partition_warning_threshold.toBytesInLong(); }
public static int getCompactionTombstoneWarningThreshold()
{
return conf.concurrent_validations;
}
+ public static int getConcurrentIndexBuilders()
+ {
+ return conf.concurrent_index_builders;
+ }
+
public static void setConcurrentValidations(int value)
{
value = value > 0 ? value : Integer.MAX_VALUE;
public static long getMinFreeSpacePerDriveInBytes()
{
- return conf.min_free_space_per_drive.toBytes();
+ return conf.min_free_space_per_drive.toBytesInLong();
}
public static boolean getDisableSTCSInL0()
return conf.stream_throughput_outbound.toMegabitsPerSecondAsInt();
}
+ public static double getStreamThroughputOutboundMegabitsPerSecAsDouble()
+ {
+ return conf.stream_throughput_outbound.toMegabitsPerSecond();
+ }
+
public static double getStreamThroughputOutboundMebibytesPerSec()
{
return conf.stream_throughput_outbound.toMebibytesPerSecond();
}
- public static void setStreamThroughputOutboundMegabitsPerSec(int value)
+ public static double getStreamThroughputOutboundBytesPerSec()
{
- conf.stream_throughput_outbound = DataRateSpec.IntMebibytesPerSecondBound.megabitsPerSecondInMebibytesPerSecond(value);
+ return conf.stream_throughput_outbound.toBytesPerSecond();
}
- public static int getEntireSSTableStreamThroughputOutboundMebibytesPerSecAsInt()
+ public static int getStreamThroughputOutboundMebibytesPerSecAsInt()
{
- return conf.entire_sstable_stream_throughput_outbound.toMebibytesPerSecondAsInt();
+ return conf.stream_throughput_outbound.toMebibytesPerSecondAsInt();
+ }
+
+ public static void setStreamThroughputOutboundMebibytesPerSecAsInt(int value)
+ {
+ if (MEBIBYTES_PER_SECOND.toMegabitsPerSecond(value) >= Integer.MAX_VALUE)
+ throw new IllegalArgumentException("stream_throughput_outbound: " + value +
+ " is too large; it should be less than " +
+ Integer.MAX_VALUE + " in megabits/s");
+
+ conf.stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
+ }
+
+ public static void setStreamThroughputOutboundMegabitsPerSec(int value)
+ {
+ conf.stream_throughput_outbound = DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(value);
}
public static double getEntireSSTableStreamThroughputOutboundMebibytesPerSec()
return conf.entire_sstable_stream_throughput_outbound.toMebibytesPerSecond();
}
+ public static double getEntireSSTableStreamThroughputOutboundBytesPerSec()
+ {
+ return conf.entire_sstable_stream_throughput_outbound.toBytesPerSecond();
+ }
+
public static void setEntireSSTableStreamThroughputOutboundMebibytesPerSec(int value)
{
- conf.entire_sstable_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound(value);
+ if (value == Integer.MAX_VALUE)
+ throw new IllegalArgumentException("entire_sstable_stream_throughput_outbound: " + value +
+ " is too large; it should be less than " +
+ Integer.MAX_VALUE + " in MiB/s");
+
+ conf.entire_sstable_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
}
public static int getInterDCStreamThroughputOutboundMegabitsPerSec()
return conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecondAsInt();
}
+ public static double getInterDCStreamThroughputOutboundMegabitsPerSecAsDouble()
+ {
+ return conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecond();
+ }
+
public static double getInterDCStreamThroughputOutboundMebibytesPerSec()
{
return conf.inter_dc_stream_throughput_outbound.toMebibytesPerSecond();
}
+ public static double getInterDCStreamThroughputOutboundBytesPerSec()
+ {
+ return conf.inter_dc_stream_throughput_outbound.toBytesPerSecond();
+ }
+
+ public static int getInterDCStreamThroughputOutboundMebibytesPerSecAsInt()
+ {
+ return conf.inter_dc_stream_throughput_outbound.toMebibytesPerSecondAsInt();
+ }
+
+ public static void setInterDCStreamThroughputOutboundMebibytesPerSecAsInt(int value)
+ {
+ if (MEBIBYTES_PER_SECOND.toMegabitsPerSecond(value) >= Integer.MAX_VALUE)
+ throw new IllegalArgumentException("inter_dc_stream_throughput_outbound: " + value +
+ " is too large; it should be less than " +
+ Integer.MAX_VALUE + " in megabits/s");
+
+ conf.inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
+ }
+
public static void setInterDCStreamThroughputOutboundMegabitsPerSec(int value)
{
- conf.inter_dc_stream_throughput_outbound = DataRateSpec.IntMebibytesPerSecondBound.megabitsPerSecondInMebibytesPerSecond(value);
+ conf.inter_dc_stream_throughput_outbound = DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(value);
}
- public static double getEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec()
+ public static double getEntireSSTableInterDCStreamThroughputOutboundBytesPerSec()
{
- return conf.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecond();
+ return conf.entire_sstable_inter_dc_stream_throughput_outbound.toBytesPerSecond();
}
- public static int getEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSecAsInt()
+ public static double getEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec()
{
- return conf.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecondAsInt();
+ return conf.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecond();
}
public static void setEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec(int value)
{
- conf.entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound(value);
+ if (value == Integer.MAX_VALUE)
+ throw new IllegalArgumentException("entire_sstable_inter_dc_stream_throughput_outbound: " + value +
+ " is too large; it should be less than " +
+ Integer.MAX_VALUE + " in MiB/s");
+
+ conf.entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
}
/**
public static long getMaxHintsFileSize()
{
- return conf.max_hints_file_size.toBytes();
+ return conf.max_hints_file_size.toBytesInLong();
}
public static ParameterizedClass getHintsCompression()
conf.key_cache_migrate_during_compaction = migrateCacheEntry;
}
+ /** This method can return negative number for disabled */
public static int getSSTablePreemptiveOpenIntervalInMiB()
{
+ if (conf.sstable_preemptive_open_interval == null)
+ return -1;
return conf.sstable_preemptive_open_interval.toMebibytes();
}
+ /** Negative number for disabled */
public static void setSSTablePreemptiveOpenIntervalInMiB(int mib)
{
- conf.sstable_preemptive_open_interval = new DataStorageSpec.IntMebibytesBound(mib);
+ if (mib < 0)
+ conf.sstable_preemptive_open_interval = null;
+ else
+ conf.sstable_preemptive_open_interval = new DataStorageSpec.IntMebibytesBound(mib);
}
public static boolean getTrickleFsync()
public static int getIndexSummaryResizeIntervalInMinutes()
{
+ if (conf.index_summary_resize_interval == null)
+ return -1;
+
return conf.index_summary_resize_interval.toMinutes();
}
+ public static void setIndexSummaryResizeIntervalInMinutes(int value)
+ {
+ if (value == -1)
+ conf.index_summary_resize_interval = null;
+ else
+ conf.index_summary_resize_interval = new DurationSpec.IntMinutesBound(value);
+ }
+
public static boolean hasLargeAddressSpace()
{
// currently we just check if it's a 64bit arch, but any we only really care if the address space is large
return conf.gc_log_threshold.toMilliseconds();
}
+ public static void setGCLogThreshold(int gcLogThreshold)
+ {
+ conf.gc_log_threshold = new DurationSpec.IntMillisecondsBound(gcLogThreshold);
+ }
+
public static EncryptionContext getEncryptionContext()
{
return encryptionContext;
return conf.gc_warn_threshold.toMilliseconds();
}
+ public static void setGCWarnThreshold(int threshold)
+ {
+ conf.gc_warn_threshold = new DurationSpec.IntMillisecondsBound(threshold);
+ }
+
public static boolean isCDCEnabled()
{
return conf.cdc_enabled;
conf.cdc_block_writes = val;
}
+ public static boolean isCDCOnRepairEnabled()
+ {
+ return conf.cdc_on_repair_enabled;
+ }
+
+ public static void setCDCOnRepairEnabled(boolean val)
+ {
+ conf.cdc_on_repair_enabled = val;
+ }
+
public static String getCDCLogLocation()
{
return conf.cdc_raw_directory;
commitLogSegmentMgrProvider = provider;
}
+ private static DataStorageSpec.IntKibibytesBound createIntKibibyteBoundAndEnsureItIsValidForByteConversion(int kibibytes, String propertyName)
+ {
+ DataStorageSpec.IntKibibytesBound intKibibytesBound = new DataStorageSpec.IntKibibytesBound(kibibytes);
+ checkValidForByteConversion(intKibibytesBound, propertyName);
+ return intKibibytesBound;
+ }
+
/**
* Ensures passed in configuration value is positive and will not overflow when converted to Bytes
*/
private static void checkValidForByteConversion(final DataStorageSpec.IntKibibytesBound value, String name)
{
- long valueInBytes = value.toBytes();
+ long valueInBytes = value.toBytesInLong();
if (valueInBytes < 0 || valueInBytes > Integer.MAX_VALUE - 1)
{
throw new ConfigurationException(String.format("%s must be positive value <= %dB, but was %dB",
throw new IllegalArgumentException(String.format("default_keyspace_rf to be set (%d) cannot be less than minimum_replication_factor_fail_threshold (%d)", value, guardrails.getMinimumReplicationFactorFailThreshold()));
}
+ if (guardrails.getMaximumReplicationFactorFailThreshold() != -1 && value > guardrails.getMaximumReplicationFactorFailThreshold())
+ {
+ throw new IllegalArgumentException(String.format("default_keyspace_rf to be set (%d) cannot be greater than maximum_replication_factor_fail_threshold (%d)", value, guardrails.getMaximumReplicationFactorFailThreshold()));
+ }
+
conf.default_keyspace_rf = value;
}
public static boolean isUUIDSSTableIdentifiersEnabled()
{
- return conf.enable_uuid_sstable_identifiers;
+ return conf.uuid_sstable_identifiers_enabled;
}
public static DurationSpec.LongNanosecondsBound getRepairStateExpires()
conf.max_top_tombstone_partition_count = value;
}
- public static DataStorageSpec.LongBytesBound getMinTrackedPartitionSize()
+ public static DataStorageSpec.LongBytesBound getMinTrackedPartitionSizeInBytes()
{
- return conf.min_tracked_partition_size_bytes;
+ return conf.min_tracked_partition_size;
}
- public static void setMinTrackedPartitionSize(DataStorageSpec.LongBytesBound spec)
+ public static void setMinTrackedPartitionSizeInBytes(DataStorageSpec.LongBytesBound spec)
{
- conf.min_tracked_partition_size_bytes = spec;
+ conf.min_tracked_partition_size = spec;
}
public static long getMinTrackedPartitionTombstoneCount()
import java.util.Objects;
import java.util.Set;
-import javax.net.ssl.KeyManagerFactory;
-import javax.net.ssl.SSLException;
-import javax.net.ssl.TrustManagerFactory;
-
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.fasterxml.jackson.annotation.JsonIgnore;
import org.apache.cassandra.locator.IEndpointSnitch;
import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.security.AbstractSslContextFactory;
import org.apache.cassandra.security.DisableSslContextFactory;
import org.apache.cassandra.security.ISslContextFactory;
import org.apache.cassandra.utils.FBUtilities;
{
KEYSTORE("keystore"),
KEYSTORE_PASSWORD("keystore_password"),
+ OUTBOUND_KEYSTORE("outbound_keystore"),
+ OUTBOUND_KEYSTORE_PASSWORD("outbound_keystore_password"),
TRUSTSTORE("truststore"),
TRUSTSTORE_PASSWORD("truststore_password"),
CIPHER_SUITES("cipher_suites"),
}
}
- private void initializeSslContextFactory()
+ protected void fillSslContextParams(Map<String, Object> sslContextFactoryParameters)
{
- Map<String,Object> sslContextFactoryParameters = new HashMap<>();
- prepareSslContextFactoryParameterizedKeys(sslContextFactoryParameters);
-
/*
* Copy all configs to the Map to pass it on to the ISslContextFactory's implementation
*/
putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.REQUIRE_ENDPOINT_VERIFICATION, this.require_endpoint_verification);
putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.ENABLED, this.enabled);
putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.OPTIONAL, this.optional);
+ }
+
+ private void initializeSslContextFactory()
+ {
+ Map<String, Object> sslContextFactoryParameters = new HashMap<>();
+ prepareSslContextFactoryParameterizedKeys(sslContextFactoryParameters);
+ fillSslContextParams(sslContextFactoryParameters);
if (CassandraRelevantProperties.TEST_JVM_DTEST_DISABLE_SSL.getBoolean())
{
}
}
- private void putSslContextFactoryParameter(Map<String,Object> existingParameters, ConfigKey configKey,
- Object value)
+ protected static void putSslContextFactoryParameter(Map<String, Object> existingParameters, ConfigKey configKey, Object value)
{
if (value != null) {
existingParameters.put(configKey.getKeyName(), value);
public final InternodeEncryption internode_encryption;
@Replaces(oldName = "enable_legacy_ssl_storage_port", deprecated = true)
public final boolean legacy_ssl_storage_port_enabled;
+ public final String outbound_keystore;
+ public final String outbound_keystore_password;
public ServerEncryptionOptions()
{
this.internode_encryption = InternodeEncryption.none;
this.legacy_ssl_storage_port_enabled = false;
+ this.outbound_keystore = null;
+ this.outbound_keystore_password = null;
}
public ServerEncryptionOptions(ParameterizedClass sslContextFactoryClass, String keystore,
- String keystore_password, String truststore, String truststore_password,
+ String keystore_password,String outbound_keystore,
+ String outbound_keystore_password, String truststore, String truststore_password,
List<String> cipher_suites, String protocol, List<String> accepted_protocols,
String algorithm, String store_type, boolean require_client_auth,
boolean require_endpoint_verification, Boolean optional,
null, optional);
this.internode_encryption = internode_encryption;
this.legacy_ssl_storage_port_enabled = legacy_ssl_storage_port_enabled;
+ this.outbound_keystore = outbound_keystore;
+ this.outbound_keystore_password = outbound_keystore_password;
}
public ServerEncryptionOptions(ServerEncryptionOptions options)
super(options);
this.internode_encryption = options.internode_encryption;
this.legacy_ssl_storage_port_enabled = options.legacy_ssl_storage_port_enabled;
+ this.outbound_keystore = options.outbound_keystore;
+ this.outbound_keystore_password = options.outbound_keystore_password;
+ }
+
+ @Override
+ protected void fillSslContextParams(Map<String, Object> sslContextFactoryParameters)
+ {
+ super.fillSslContextParams(sslContextFactoryParameters);
+ putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.OUTBOUND_KEYSTORE, this.outbound_keystore);
+ putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.OUTBOUND_KEYSTORE_PASSWORD, this.outbound_keystore_password);
}
@Override
* values of "dc" and "all". This method returns the explicit, raw value of {@link #optional}
* as set by the user (if set at all).
*/
- @JsonIgnore
public boolean isExplicitlyOptional()
{
return optional != null && optional;
public ServerEncryptionOptions withSslContextFactory(ParameterizedClass sslContextFactoryClass)
{
- return new ServerEncryptionOptions(sslContextFactoryClass, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(sslContextFactoryClass, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
public ServerEncryptionOptions withKeyStore(String keystore)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
public ServerEncryptionOptions withKeyStorePassword(String keystore_password)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
public ServerEncryptionOptions withTrustStore(String truststore)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
public ServerEncryptionOptions withTrustStorePassword(String truststore_password)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
public ServerEncryptionOptions withCipherSuites(List<String> cipher_suites)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
legacy_ssl_storage_port_enabled).applyConfigInternal();
}
- public ServerEncryptionOptions withCipherSuites(String ... cipher_suites)
+ public ServerEncryptionOptions withCipherSuites(String... cipher_suites)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, Arrays.asList(cipher_suites), protocol,
accepted_protocols, algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
public ServerEncryptionOptions withProtocol(String protocol)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
public ServerEncryptionOptions withAcceptedProtocols(List<String> accepted_protocols)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
public ServerEncryptionOptions withAlgorithm(String algorithm)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
public ServerEncryptionOptions withStoreType(String store_type)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
public ServerEncryptionOptions withRequireClientAuth(boolean require_client_auth)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
public ServerEncryptionOptions withRequireEndpointVerification(boolean require_endpoint_verification)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
public ServerEncryptionOptions withOptional(boolean optional)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
public ServerEncryptionOptions withInternodeEncryption(InternodeEncryption internode_encryption)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
public ServerEncryptionOptions withLegacySslStoragePort(boolean enable_legacy_ssl_storage_port)
{
- return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outbound_keystore_password, truststore,
truststore_password, cipher_suites, protocol, accepted_protocols,
algorithm, store_type, require_client_auth,
require_endpoint_verification, optional, internode_encryption,
enable_legacy_ssl_storage_port).applyConfigInternal();
}
+ public ServerEncryptionOptions withOutboundKeystore(String outboundKeystore)
+ {
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outboundKeystore, outbound_keystore_password, truststore,
+ truststore_password, cipher_suites, protocol, accepted_protocols,
+ algorithm, store_type, require_client_auth,
+ require_endpoint_verification, optional, internode_encryption,
+ legacy_ssl_storage_port_enabled).applyConfigInternal();
+ }
+
+ public ServerEncryptionOptions withOutboundKeystorePassword(String outboundKeystorePassword)
+ {
+ return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+ outbound_keystore, outboundKeystorePassword, truststore,
+ truststore_password, cipher_suites, protocol, accepted_protocols,
+ algorithm, store_type, require_client_auth,
+ require_endpoint_verification, optional, internode_encryption,
+ legacy_ssl_storage_port_enabled).applyConfigInternal();
+ }
}
}
validateMaxIntThreshold(config.fields_per_udt_warn_threshold, config.fields_per_udt_fail_threshold, "fields_per_udt");
validatePercentageThreshold(config.data_disk_usage_percentage_warn_threshold, config.data_disk_usage_percentage_fail_threshold, "data_disk_usage_percentage");
validateDataDiskUsageMaxDiskSize(config.data_disk_usage_max_disk_size);
- validateMinRFThreshold(config.minimum_replication_factor_warn_threshold, config.minimum_replication_factor_fail_threshold, "minimum_replication_factor");
+ validateMinRFThreshold(config.minimum_replication_factor_warn_threshold, config.minimum_replication_factor_fail_threshold);
+ validateMaxRFThreshold(config.maximum_replication_factor_warn_threshold, config.maximum_replication_factor_fail_threshold);
}
@Override
x -> config.drop_truncate_table_enabled = x);
}
+ @Override
+ public boolean getDropKeyspaceEnabled()
+ {
+ return config.drop_keyspace_enabled;
+ }
+
+ public void setDropKeyspaceEnabled(boolean enabled)
+ {
+ updatePropertyWithLogging("drop_keyspace_enabled",
+ enabled,
+ () -> config.drop_keyspace_enabled,
+ x -> config.drop_keyspace_enabled = x);
+ }
+
@Override
public boolean getSecondaryIndexesEnabled()
{
x -> config.compact_tables_enabled = x);
}
+ @Override
+ public boolean getAlterTableEnabled()
+ {
+ return config.alter_table_enabled;
+ }
+
+ public void setAlterTableEnabled(boolean enabled)
+ {
+ updatePropertyWithLogging("alter_table_enabled",
+ enabled,
+ () -> config.alter_table_enabled,
+ x -> config.alter_table_enabled = x);
+ }
+
@Override
public boolean getReadBeforeWriteListOperationsEnabled()
{