DataOutputBuffer#scratchBuffer can use off-heap or on-heap memory as a means to contr... trunk
authorDavid Capwell <dcapwell@apache.org>
Fri, 12 Aug 2022 22:20:37 +0000 (15:20 -0700)
committerDavid Capwell <dcapwell@apache.org>
Sat, 13 Aug 2022 03:04:14 +0000 (20:04 -0700)
patch by David Capwell; reviewed by Caleb Rackliffe for CASSANDRA-16471

474 files changed:
.build/build-rat.xml
.circleci/config-2_1.yml
.circleci/config-2_1.yml.high_res.patch
.circleci/config-2_1.yml.mid_res.patch
.circleci/config.yml
.circleci/config.yml.HIGHRES
.circleci/config.yml.LOWRES
.circleci/config.yml.MIDRES
CHANGES.txt
NEWS.txt
README.asc
bin/cqlsh.py
build.xml
checkstyle.xml
conf/cassandra.yaml
debian/cassandra.postinst
debian/changelog
doc/cql3/CQL.textile
doc/modules/cassandra/pages/cql/appendices.adoc
doc/modules/cassandra/pages/cql/changes.adoc
doc/modules/cassandra/pages/cql/cql_singlefile.adoc
doc/modules/cassandra/pages/cql/dml.adoc
doc/modules/cassandra/pages/faq/index.adoc
doc/modules/cassandra/pages/new/virtualtables.adoc
doc/modules/cassandra/pages/operating/compaction/index.adoc
doc/modules/cassandra/pages/tools/cqlsh.adoc
ide/nbproject/project.xml
pylib/cqlshlib/cql3handling.py
pylib/cqlshlib/cqlshhandling.py
pylib/cqlshlib/formatting.py
pylib/cqlshlib/pylexotron.py
redhat/cassandra.spec
redhat/noboolean/README [new file with mode: 0644]
redhat/noboolean/cassandra [new symlink]
redhat/noboolean/cassandra.conf [new symlink]
redhat/noboolean/cassandra.in.sh [new symlink]
redhat/noboolean/cassandra.spec [new file with mode: 0644]
redhat/noboolean/default [new symlink]
src/antlr/Lexer.g
src/antlr/Parser.g
src/java/org/apache/cassandra/auth/AllowAllInternodeAuthenticator.java
src/java/org/apache/cassandra/auth/CassandraRoleManager.java
src/java/org/apache/cassandra/auth/IInternodeAuthenticator.java
src/java/org/apache/cassandra/concurrent/DebuggableTask.java [new file with mode: 0644]
src/java/org/apache/cassandra/concurrent/ExecutionFailure.java
src/java/org/apache/cassandra/concurrent/ExecutorFactory.java
src/java/org/apache/cassandra/concurrent/FutureTask.java
src/java/org/apache/cassandra/concurrent/SEPWorker.java
src/java/org/apache/cassandra/concurrent/SharedExecutorPool.java
src/java/org/apache/cassandra/concurrent/TaskFactory.java
src/java/org/apache/cassandra/config/CassandraRelevantProperties.java
src/java/org/apache/cassandra/config/Config.java
src/java/org/apache/cassandra/config/Converters.java
src/java/org/apache/cassandra/config/DataRateSpec.java
src/java/org/apache/cassandra/config/DataStorageSpec.java
src/java/org/apache/cassandra/config/DatabaseDescriptor.java
src/java/org/apache/cassandra/config/EncryptionOptions.java
src/java/org/apache/cassandra/config/GuardrailsOptions.java
src/java/org/apache/cassandra/config/YamlConfigurationLoader.java
src/java/org/apache/cassandra/cql3/CQL3Type.java
src/java/org/apache/cassandra/cql3/ColumnIdentifier.java
src/java/org/apache/cassandra/cql3/QueryProcessor.java
src/java/org/apache/cassandra/cql3/Tuples.java
src/java/org/apache/cassandra/cql3/UserTypes.java
src/java/org/apache/cassandra/cql3/conditions/ColumnCondition.java
src/java/org/apache/cassandra/cql3/selection/AggregateFunctionSelector.java
src/java/org/apache/cassandra/cql3/selection/ColumnTimestamps.java [new file with mode: 0644]
src/java/org/apache/cassandra/cql3/selection/ElementsSelector.java
src/java/org/apache/cassandra/cql3/selection/FieldSelector.java
src/java/org/apache/cassandra/cql3/selection/ListSelector.java
src/java/org/apache/cassandra/cql3/selection/MapSelector.java
src/java/org/apache/cassandra/cql3/selection/ResultSetBuilder.java
src/java/org/apache/cassandra/cql3/selection/RowTimestamps.java [new file with mode: 0644]
src/java/org/apache/cassandra/cql3/selection/ScalarFunctionSelector.java
src/java/org/apache/cassandra/cql3/selection/Selectable.java
src/java/org/apache/cassandra/cql3/selection/Selection.java
src/java/org/apache/cassandra/cql3/selection/Selector.java
src/java/org/apache/cassandra/cql3/selection/SelectorFactories.java
src/java/org/apache/cassandra/cql3/selection/SetSelector.java
src/java/org/apache/cassandra/cql3/selection/SimpleSelector.java
src/java/org/apache/cassandra/cql3/selection/TermSelector.java
src/java/org/apache/cassandra/cql3/selection/TupleSelector.java
src/java/org/apache/cassandra/cql3/selection/UserTypeSelector.java
src/java/org/apache/cassandra/cql3/selection/WritetimeOrTTLSelector.java
src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
src/java/org/apache/cassandra/cql3/statements/schema/AlterKeyspaceStatement.java
src/java/org/apache/cassandra/cql3/statements/schema/AlterTableStatement.java
src/java/org/apache/cassandra/cql3/statements/schema/CreateKeyspaceStatement.java
src/java/org/apache/cassandra/cql3/statements/schema/DropKeyspaceStatement.java
src/java/org/apache/cassandra/cql3/statements/schema/KeyspaceAttributes.java
src/java/org/apache/cassandra/db/ArrayClusteringBound.java
src/java/org/apache/cassandra/db/BufferClusteringBound.java
src/java/org/apache/cassandra/db/BufferClusteringBoundary.java
src/java/org/apache/cassandra/db/BufferDecoratedKey.java
src/java/org/apache/cassandra/db/Clustering.java
src/java/org/apache/cassandra/db/ClusteringBound.java
src/java/org/apache/cassandra/db/ClusteringBoundOrBoundary.java
src/java/org/apache/cassandra/db/ClusteringComparator.java
src/java/org/apache/cassandra/db/ClusteringPrefix.java
src/java/org/apache/cassandra/db/ColumnFamilyStore.java
src/java/org/apache/cassandra/db/ColumnFamilyStoreMBean.java
src/java/org/apache/cassandra/db/Columns.java
src/java/org/apache/cassandra/db/DataRange.java
src/java/org/apache/cassandra/db/DecoratedKey.java
src/java/org/apache/cassandra/db/DeletionInfo.java
src/java/org/apache/cassandra/db/Directories.java
src/java/org/apache/cassandra/db/DisallowedDirectories.java
src/java/org/apache/cassandra/db/EmptyIterators.java
src/java/org/apache/cassandra/db/Keyspace.java
src/java/org/apache/cassandra/db/MutableDeletionInfo.java
src/java/org/apache/cassandra/db/NativeDecoratedKey.java
src/java/org/apache/cassandra/db/PartitionPosition.java
src/java/org/apache/cassandra/db/RangeTombstoneList.java
src/java/org/apache/cassandra/db/RegularAndStaticColumns.java
src/java/org/apache/cassandra/db/SSTableImporter.java
src/java/org/apache/cassandra/db/SnapshotDetailsTabularData.java
src/java/org/apache/cassandra/db/SystemKeyspace.java
src/java/org/apache/cassandra/db/aggregation/AggregationSpecification.java
src/java/org/apache/cassandra/db/aggregation/GroupMaker.java
src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java
src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java
src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java
src/java/org/apache/cassandra/db/commitlog/CommitLog.java
src/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java
src/java/org/apache/cassandra/db/compaction/CompactionManager.java
src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java
src/java/org/apache/cassandra/db/compaction/Scrubber.java
src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java
src/java/org/apache/cassandra/db/compaction/Verifier.java
src/java/org/apache/cassandra/db/filter/ClusteringIndexNamesFilter.java
src/java/org/apache/cassandra/db/guardrails/DisableFlag.java [deleted file]
src/java/org/apache/cassandra/db/guardrails/EnableFlag.java [new file with mode: 0644]
src/java/org/apache/cassandra/db/guardrails/Guardrails.java
src/java/org/apache/cassandra/db/guardrails/GuardrailsConfig.java
src/java/org/apache/cassandra/db/guardrails/GuardrailsMBean.java
src/java/org/apache/cassandra/db/lifecycle/LogReplica.java
src/java/org/apache/cassandra/db/marshal/AbstractTimeUUIDType.java
src/java/org/apache/cassandra/db/marshal/AbstractType.java
src/java/org/apache/cassandra/db/marshal/BooleanType.java
src/java/org/apache/cassandra/db/marshal/ByteArrayAccessor.java
src/java/org/apache/cassandra/db/marshal/ByteArrayObjectFactory.java
src/java/org/apache/cassandra/db/marshal/ByteBufferAccessor.java
src/java/org/apache/cassandra/db/marshal/ByteBufferObjectFactory.java
src/java/org/apache/cassandra/db/marshal/ByteType.java
src/java/org/apache/cassandra/db/marshal/CollectionType.java
src/java/org/apache/cassandra/db/marshal/CompositeType.java
src/java/org/apache/cassandra/db/marshal/DateType.java
src/java/org/apache/cassandra/db/marshal/DecimalType.java
src/java/org/apache/cassandra/db/marshal/DoubleType.java
src/java/org/apache/cassandra/db/marshal/DynamicCompositeType.java
src/java/org/apache/cassandra/db/marshal/EmptyType.java
src/java/org/apache/cassandra/db/marshal/FloatType.java
src/java/org/apache/cassandra/db/marshal/Int32Type.java
src/java/org/apache/cassandra/db/marshal/IntegerType.java
src/java/org/apache/cassandra/db/marshal/LexicalUUIDType.java
src/java/org/apache/cassandra/db/marshal/ListType.java
src/java/org/apache/cassandra/db/marshal/LongType.java
src/java/org/apache/cassandra/db/marshal/MapType.java
src/java/org/apache/cassandra/db/marshal/PartitionerDefinedOrder.java
src/java/org/apache/cassandra/db/marshal/ReversedType.java
src/java/org/apache/cassandra/db/marshal/SetType.java
src/java/org/apache/cassandra/db/marshal/ShortType.java
src/java/org/apache/cassandra/db/marshal/SimpleDateType.java
src/java/org/apache/cassandra/db/marshal/TimeType.java
src/java/org/apache/cassandra/db/marshal/TimestampType.java
src/java/org/apache/cassandra/db/marshal/TupleType.java
src/java/org/apache/cassandra/db/marshal/UUIDType.java
src/java/org/apache/cassandra/db/marshal/UserType.java
src/java/org/apache/cassandra/db/marshal/ValueAccessor.java
src/java/org/apache/cassandra/db/memtable/AbstractAllocatorMemtable.java
src/java/org/apache/cassandra/db/memtable/AbstractMemtable.java
src/java/org/apache/cassandra/db/memtable/AbstractMemtableWithCommitlog.java
src/java/org/apache/cassandra/db/memtable/Flushing.java
src/java/org/apache/cassandra/db/memtable/Memtable.java
src/java/org/apache/cassandra/db/memtable/ShardedSkipListMemtable.java
src/java/org/apache/cassandra/db/memtable/SkipListMemtable.java
src/java/org/apache/cassandra/db/partitions/AbstractBTreePartition.java
src/java/org/apache/cassandra/db/partitions/AtomicBTreePartition.java
src/java/org/apache/cassandra/db/partitions/FilteredPartition.java
src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java
src/java/org/apache/cassandra/db/rows/AbstractCell.java
src/java/org/apache/cassandra/db/rows/ArrayCell.java
src/java/org/apache/cassandra/db/rows/BTreeRow.java
src/java/org/apache/cassandra/db/rows/BufferCell.java
src/java/org/apache/cassandra/db/rows/Cell.java
src/java/org/apache/cassandra/db/rows/CellPath.java
src/java/org/apache/cassandra/db/rows/Cells.java
src/java/org/apache/cassandra/db/rows/ColumnData.java
src/java/org/apache/cassandra/db/rows/ComplexColumnData.java
src/java/org/apache/cassandra/db/rows/EncodingStats.java
src/java/org/apache/cassandra/db/rows/RangeTombstoneBoundMarker.java
src/java/org/apache/cassandra/db/rows/RangeTombstoneBoundaryMarker.java
src/java/org/apache/cassandra/db/rows/RangeTombstoneMarker.java
src/java/org/apache/cassandra/db/rows/Row.java
src/java/org/apache/cassandra/db/rows/Rows.java
src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
src/java/org/apache/cassandra/db/rows/WrappingUnfilteredRowIterator.java
src/java/org/apache/cassandra/db/streaming/CassandraStreamManager.java
src/java/org/apache/cassandra/db/streaming/CassandraStreamReceiver.java
src/java/org/apache/cassandra/db/view/TableViews.java
src/java/org/apache/cassandra/db/virtual/QueriesTable.java [new file with mode: 0644]
src/java/org/apache/cassandra/db/virtual/SystemViewsKeyspace.java
src/java/org/apache/cassandra/dht/ByteOrderedPartitioner.java
src/java/org/apache/cassandra/dht/LocalPartitioner.java
src/java/org/apache/cassandra/dht/Murmur3Partitioner.java
src/java/org/apache/cassandra/dht/OrderPreservingPartitioner.java
src/java/org/apache/cassandra/dht/RandomPartitioner.java
src/java/org/apache/cassandra/dht/Token.java
src/java/org/apache/cassandra/gms/Gossiper.java
src/java/org/apache/cassandra/gms/VersionedValue.java
src/java/org/apache/cassandra/hints/HintsCatalog.java
src/java/org/apache/cassandra/hints/HintsWriter.java
src/java/org/apache/cassandra/index/internal/CassandraIndexSearcher.java
src/java/org/apache/cassandra/io/sstable/Descriptor.java
src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java
src/java/org/apache/cassandra/io/sstable/IndexSummaryManagerMBean.java
src/java/org/apache/cassandra/io/sstable/SSTable.java
src/java/org/apache/cassandra/io/sstable/SSTableHeaderFix.java
src/java/org/apache/cassandra/io/sstable/SSTableLoader.java
src/java/org/apache/cassandra/io/util/BufferedDataOutputStreamPlus.java
src/java/org/apache/cassandra/io/util/ChecksumWriter.java
src/java/org/apache/cassandra/io/util/DataOutputBuffer.java
src/java/org/apache/cassandra/io/util/File.java
src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
src/java/org/apache/cassandra/locator/SimpleStrategy.java
src/java/org/apache/cassandra/locator/TokenMetadata.java
src/java/org/apache/cassandra/metrics/TopPartitionTracker.java
src/java/org/apache/cassandra/net/Crc.java
src/java/org/apache/cassandra/net/InboundConnectionInitiator.java
src/java/org/apache/cassandra/net/InboundConnectionSettings.java
src/java/org/apache/cassandra/net/InternodeConnectionUtils.java [new file with mode: 0644]
src/java/org/apache/cassandra/net/MessagingService.java
src/java/org/apache/cassandra/net/OutboundConnectionInitiator.java
src/java/org/apache/cassandra/net/OutboundConnectionSettings.java
src/java/org/apache/cassandra/net/StartupClusterConnectivityChecker.java
src/java/org/apache/cassandra/schema/DefaultSchemaUpdateHandler.java
src/java/org/apache/cassandra/schema/Schema.java
src/java/org/apache/cassandra/security/AbstractSslContextFactory.java
src/java/org/apache/cassandra/security/DisableSslContextFactory.java
src/java/org/apache/cassandra/security/FileBasedSslContextFactory.java
src/java/org/apache/cassandra/security/ISslContextFactory.java
src/java/org/apache/cassandra/security/PEMBasedSslContextFactory.java
src/java/org/apache/cassandra/serializers/AbstractMapSerializer.java [new file with mode: 0644]
src/java/org/apache/cassandra/serializers/BooleanSerializer.java
src/java/org/apache/cassandra/serializers/CollectionSerializer.java
src/java/org/apache/cassandra/serializers/ListSerializer.java
src/java/org/apache/cassandra/serializers/MapSerializer.java
src/java/org/apache/cassandra/serializers/SetSerializer.java
src/java/org/apache/cassandra/service/CassandraDaemon.java
src/java/org/apache/cassandra/service/GCInspector.java
src/java/org/apache/cassandra/service/StartupCheck.java
src/java/org/apache/cassandra/service/StartupChecks.java
src/java/org/apache/cassandra/service/StorageProxy.java
src/java/org/apache/cassandra/service/StorageService.java
src/java/org/apache/cassandra/service/StorageServiceMBean.java
src/java/org/apache/cassandra/service/paxos/Paxos.java
src/java/org/apache/cassandra/service/paxos/PaxosPrepare.java
src/java/org/apache/cassandra/service/paxos/PaxosRepairHistory.java
src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosBallotTracker.java
src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosStateTracker.java
src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedIndex.java
src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTracker.java
src/java/org/apache/cassandra/service/snapshot/SnapshotLoader.java
src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java
src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java
src/java/org/apache/cassandra/streaming/StreamManager.java
src/java/org/apache/cassandra/streaming/StreamSession.java
src/java/org/apache/cassandra/streaming/StreamTransferTask.java
src/java/org/apache/cassandra/streaming/StreamingChannel.java
src/java/org/apache/cassandra/streaming/async/StreamingMultiplexedChannel.java
src/java/org/apache/cassandra/tools/BulkLoadConnectionFactory.java
src/java/org/apache/cassandra/tools/BulkLoader.java
src/java/org/apache/cassandra/tools/LoaderOptions.java
src/java/org/apache/cassandra/tools/NodeProbe.java
src/java/org/apache/cassandra/tools/nodetool/Compact.java
src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java
src/java/org/apache/cassandra/tools/nodetool/GetCompactionThroughput.java
src/java/org/apache/cassandra/tools/nodetool/GetInterDCStreamThroughput.java
src/java/org/apache/cassandra/tools/nodetool/GetStreamThroughput.java
src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java
src/java/org/apache/cassandra/tools/nodetool/SetInterDCStreamThroughput.java
src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java
src/java/org/apache/cassandra/tools/nodetool/stats/StatsTable.java
src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsHolder.java
src/java/org/apache/cassandra/transport/Dispatcher.java
src/java/org/apache/cassandra/transport/InitialConnectionHandler.java
src/java/org/apache/cassandra/transport/Message.java
src/java/org/apache/cassandra/transport/messages/QueryMessage.java
src/java/org/apache/cassandra/utils/ExpiringMemoizingSupplier.java
src/java/org/apache/cassandra/utils/binlog/BinLog.java
src/java/org/apache/cassandra/utils/binlog/ExternalArchiver.java
src/java/org/apache/cassandra/utils/btree/BTree.java
src/java/org/apache/cassandra/utils/btree/UpdateFunction.java
src/java/org/apache/cassandra/utils/bytecomparable/ByteComparable.java [new file with mode: 0644]
src/java/org/apache/cassandra/utils/bytecomparable/ByteComparable.md [new file with mode: 0644]
src/java/org/apache/cassandra/utils/bytecomparable/ByteSource.java [new file with mode: 0644]
src/java/org/apache/cassandra/utils/bytecomparable/ByteSourceInverse.java [new file with mode: 0644]
src/java/org/apache/cassandra/utils/memory/ByteBufferCloner.java [moved from src/java/org/apache/cassandra/utils/memory/AbstractAllocator.java with 50% similarity]
src/java/org/apache/cassandra/utils/memory/Cloner.java [new file with mode: 0644]
src/java/org/apache/cassandra/utils/memory/ContextAllocator.java [deleted file]
src/java/org/apache/cassandra/utils/memory/EnsureOnHeap.java
src/java/org/apache/cassandra/utils/memory/HeapCloner.java [new file with mode: 0644]
src/java/org/apache/cassandra/utils/memory/HeapPool.java
src/java/org/apache/cassandra/utils/memory/MemtableAllocator.java
src/java/org/apache/cassandra/utils/memory/MemtableBufferAllocator.java
src/java/org/apache/cassandra/utils/memory/NativeAllocator.java
src/java/org/apache/cassandra/utils/memory/SlabAllocator.java
test/burn/org/apache/cassandra/utils/LongBTreeTest.java
test/conf/cassandra-converters-special-cases-old-names.yaml [new file with mode: 0644]
test/conf/cassandra-converters-special-cases.yaml [new file with mode: 0644]
test/conf/cassandra_ssl_test_outbound.keystore [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/action/GossipHelper.java
test/distributed/org/apache/cassandra/distributed/fuzz/InJvmSutBase.java
test/distributed/org/apache/cassandra/distributed/impl/AbstractCluster.java
test/distributed/org/apache/cassandra/distributed/impl/Instance.java
test/distributed/org/apache/cassandra/distributed/impl/RowUtil.java
test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java
test/distributed/org/apache/cassandra/distributed/test/AlterTest.java
test/distributed/org/apache/cassandra/distributed/test/EphemeralSnapshotTest.java [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/test/GossipTest.java
test/distributed/org/apache/cassandra/distributed/test/InternodeEncryptionEnforcementTest.java
test/distributed/org/apache/cassandra/distributed/test/NativeProtocolTest.java
test/distributed/org/apache/cassandra/distributed/test/PaxosRepairTest2.java
test/distributed/org/apache/cassandra/distributed/test/QueriesTableTest.java [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/test/RepairErrorsTest.java
test/distributed/org/apache/cassandra/distributed/test/SSTableIdGenerationTest.java
test/distributed/org/apache/cassandra/distributed/test/SchemaTest.java
test/distributed/org/apache/cassandra/distributed/test/TopPartitionsTest.java
test/distributed/org/apache/cassandra/distributed/test/cdc/ToggleCDCOnRepairEnabledTest.java [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/test/hostreplacement/FailedBootstrapTest.java [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/test/hostreplacement/HostReplacementTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/BatchUpgradeTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageColumnDeleteTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageHiddenColumnTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageImplicitNullInClusteringTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/CompactStoragePagingTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/DropCompactStorageNullClusteringValuesTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/DropCompactStorageTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/GroupByTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityTestBase.java
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30AllOneTest.java [moved from test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30Test.java with 71% similarity]
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30OneAllTest.java [moved from test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XTest.java with 71% similarity]
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30QuorumQuorumTest.java [moved from src/java/org/apache/cassandra/utils/memory/HeapAllocator.java with 57% similarity]
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XAllOneTest.java [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XOneAllTest.java [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XQuorumQuorumTest.java [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyTestBase.java
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3ReplicationTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeGossipTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeMessageForwardTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairDeleteTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeRepairTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReplicationTestBase.java [deleted file]
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeWritetimeOrTTLTest.java [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/upgrade/Pre40MessageFilterTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTestBase.java
test/long/org/apache/cassandra/cql3/CorruptionTest.java
test/microbench/org/apache/cassandra/test/microbench/AbstractTypeByteSourceDecodingBench.java [new file with mode: 0644]
test/microbench/org/apache/cassandra/test/microbench/btree/AtomicBTreePartitionUpdateBench.java [new file with mode: 0644]
test/microbench/org/apache/cassandra/test/microbench/btree/Megamorphism.java
test/simulator/asm/org/apache/cassandra/simulator/asm/InterceptClasses.java
test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterActionListener.java
test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterActions.java
test/simulator/main/org/apache/cassandra/simulator/package-info.java
test/simulator/main/org/apache/cassandra/simulator/paxos/Ballots.java
test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosRepairValidator.java
test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosTopologyChangeVerifier.java
test/simulator/main/org/apache/cassandra/simulator/utils/KindOfSequence.java
test/simulator/test/org/apache/cassandra/simulator/test/ClassWithSynchronizedMethods.java
test/simulator/test/org/apache/cassandra/simulator/test/ShortPaxosSimulationTest.java
test/simulator/test/org/apache/cassandra/simulator/test/SimulationTestBase.java
test/simulator/test/org/apache/cassandra/simulator/test/TrivialSimulationTest.java
test/unit/org/apache/cassandra/SchemaLoader.java
test/unit/org/apache/cassandra/ServerTestUtils.java
test/unit/org/apache/cassandra/Util.java
test/unit/org/apache/cassandra/audit/AuditLoggerAuthTest.java
test/unit/org/apache/cassandra/concurrent/DebuggableScheduledThreadPoolExecutorTest.java
test/unit/org/apache/cassandra/config/DataRateSpecTest.java
test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java
test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java
test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java
test/unit/org/apache/cassandra/config/LoadOldYAMLBackwardCompatibilityTest.java
test/unit/org/apache/cassandra/config/ParseAndConvertUnitsTest.java
test/unit/org/apache/cassandra/config/YamlConfigurationLoaderTest.java
test/unit/org/apache/cassandra/cql3/BatchTests.java
test/unit/org/apache/cassandra/cql3/CQLTester.java
test/unit/org/apache/cassandra/cql3/PagingTest.java
test/unit/org/apache/cassandra/cql3/selection/SelectorSerializationTest.java
test/unit/org/apache/cassandra/cql3/validation/entities/CollectionsTest.java
test/unit/org/apache/cassandra/cql3/validation/entities/TimestampTest.java
test/unit/org/apache/cassandra/cql3/validation/entities/TupleTypeTest.java
test/unit/org/apache/cassandra/cql3/validation/entities/UserTypesTest.java
test/unit/org/apache/cassandra/cql3/validation/entities/WritetimeOrTTLTest.java
test/unit/org/apache/cassandra/cql3/validation/operations/DeleteTest.java
test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionTest.java
test/unit/org/apache/cassandra/db/CellTest.java
test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
test/unit/org/apache/cassandra/db/DirectoriesTest.java
test/unit/org/apache/cassandra/db/KeyCacheTest.java
test/unit/org/apache/cassandra/db/NativeCellTest.java
test/unit/org/apache/cassandra/db/TopPartitionTrackerTest.java
test/unit/org/apache/cassandra/db/aggregation/GroupMakerTest.java
test/unit/org/apache/cassandra/db/commitlog/CommitLogCQLTest.java
test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerTest.java
test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java
test/unit/org/apache/cassandra/db/guardrails/GuardrailAlterTableTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/db/guardrails/GuardrailDropKeyspaceTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/db/guardrails/GuardrailMaximumReplicationFactorTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/db/guardrails/GuardrailMinimumReplicationFactorTest.java
test/unit/org/apache/cassandra/db/guardrails/GuardrailSimpleStrategyTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/db/guardrails/GuardrailTester.java
test/unit/org/apache/cassandra/db/guardrails/GuardrailsTest.java
test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java
test/unit/org/apache/cassandra/db/marshal/TypeValidationTest.java
test/unit/org/apache/cassandra/db/rows/RowBuilder.java [deleted file]
test/unit/org/apache/cassandra/db/rows/RowsMergingTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/db/rows/RowsTest.java
test/unit/org/apache/cassandra/db/virtual/SettingsTableTest.java
test/unit/org/apache/cassandra/dht/KeyCollisionTest.java
test/unit/org/apache/cassandra/dht/LengthPartitioner.java
test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterClientTest.java
test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java
test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
test/unit/org/apache/cassandra/locator/NetworkTopologyStrategyTest.java
test/unit/org/apache/cassandra/metrics/BatchMetricsTest.java
test/unit/org/apache/cassandra/metrics/CQLMetricsTest.java
test/unit/org/apache/cassandra/metrics/ClientRequestMetricsTest.java
test/unit/org/apache/cassandra/metrics/KeyspaceMetricsTest.java
test/unit/org/apache/cassandra/metrics/TableMetricsTest.java
test/unit/org/apache/cassandra/net/MessagingServiceTest.java
test/unit/org/apache/cassandra/schema/SchemaTest.java
test/unit/org/apache/cassandra/security/DefaultSslContextFactoryTest.java
test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryTest.java
test/unit/org/apache/cassandra/security/SSLFactoryTest.java
test/unit/org/apache/cassandra/serializers/MapSerializerTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/serializers/SetSerializerTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/service/GCInspectorTest.java
test/unit/org/apache/cassandra/service/StartupChecksTest.java
test/unit/org/apache/cassandra/service/StorageProxyTest.java
test/unit/org/apache/cassandra/service/StorageServiceTest.java
test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTrackerTest.java
test/unit/org/apache/cassandra/service/snapshot/SnapshotLoaderTest.java
test/unit/org/apache/cassandra/service/snapshot/SnapshotManagerTest.java
test/unit/org/apache/cassandra/service/snapshot/SnapshotManifestTest.java
test/unit/org/apache/cassandra/service/snapshot/TableSnapshotTest.java
test/unit/org/apache/cassandra/streaming/StreamManagerTest.java
test/unit/org/apache/cassandra/tools/ToolRunner.java
test/unit/org/apache/cassandra/tools/nodetool/ClientStatsTest.java
test/unit/org/apache/cassandra/tools/nodetool/CompactionStatsTest.java
test/unit/org/apache/cassandra/tools/nodetool/SetGetColumnIndexSizeTest.java
test/unit/org/apache/cassandra/tools/nodetool/SetGetCompactionThroughputTest.java
test/unit/org/apache/cassandra/tools/nodetool/SetGetEntireSSTableInterDCStreamThroughputTest.java
test/unit/org/apache/cassandra/tools/nodetool/SetGetEntireSSTableStreamThroughputTest.java
test/unit/org/apache/cassandra/tools/nodetool/SetGetInterDCStreamThroughputTest.java
test/unit/org/apache/cassandra/tools/nodetool/SetGetStreamThroughputTest.java
test/unit/org/apache/cassandra/tools/nodetool/TpStatsTest.java
test/unit/org/apache/cassandra/transport/CQLUserAuditTest.java
test/unit/org/apache/cassandra/transport/SerDeserTest.java
test/unit/org/apache/cassandra/utils/SimpleGraph.java [new file with mode: 0644]
test/unit/org/apache/cassandra/utils/SimpleGraphTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/utils/btree/BTreeTest.java
test/unit/org/apache/cassandra/utils/bytecomparable/AbstractTypeByteSourceTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceComparisonTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceConversionTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceInverseTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceSequenceTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceTestBase.java [new file with mode: 0644]
test/unit/org/apache/cassandra/utils/bytecomparable/DecoratedKeyByteSourceTest.java [new file with mode: 0644]
tools/stress/src/org/apache/cassandra/io/sstable/StressCQLSSTableWriter.java
tools/stress/src/org/apache/cassandra/stress/CompactionStress.java

index 5632664486d43ee9d13be776bb587b91aa7b82b0..6a3d72e1ca45bd2a5266b2da653dcea8a51e0352 100644 (file)
@@ -58,6 +58,8 @@
                  <exclude NAME="**/doc/antora.yml"/>
                  <exclude name="**/test/conf/cassandra.yaml"/>
                  <exclude name="**/test/conf/cassandra-old.yaml"/>
+                 <exclude name="**/test/conf/cassandra-converters-special-cases-old-names.yaml"/>
+                 <exclude name="**/test/conf/cassandra-converters-special-cases.yaml"/>
                  <exclude name="**/test/conf/cassandra_encryption.yaml"/>
                  <exclude name="**/test/conf/cdc.yaml"/>
                  <exclude name="**/test/conf/commitlog_compression_LZ4.yaml"/>
index 0d113a08ca5a18748830fad2e14ac8370571a5c5..5aa00b16f6b27ebf3dc31f3c47cc4168ee40ed48 100644 (file)
@@ -200,6 +200,10 @@ j8_with_dtests_jobs: &j8_with_dtests_jobs
         requires:
           - start_j8_jvm_dtests
           - j8_build
+    - j8_simulator_dtests:
+        requires:
+          - start_j8_jvm_dtests
+          - j8_build
     - j8_jvm_dtests_vnode:
         requires:
           - start_j8_jvm_dtests
@@ -381,6 +385,9 @@ j8_pre-commit_jobs: &j8_pre-commit_jobs
     - j8_unit_tests:
         requires:
           - j8_build
+    - j8_simulator_dtests:
+        requires:
+          - j8_build
     - j8_jvm_dtests:
         requires:
           - j8_build
@@ -738,6 +745,15 @@ jobs:
       - log_environment
       - run_parallel_junit_tests
 
+  j8_simulator_dtests:
+    <<: *j8_small_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - create_junit_containers
+      - log_environment
+      - run_simulator_tests
+
   j8_jvm_dtests:
     <<: *j8_small_par_executor
     steps:
@@ -1312,6 +1328,33 @@ commands:
 
         no_output_timeout: 15m
 
+  run_simulator_tests:
+    parameters:
+      no_output_timeout:
+        type: string
+        default: 30m
+    steps:
+    - run:
+        name: Run Simulator Tests 
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          ant test-simulator-dtest
+        no_output_timeout: <<parameters.no_output_timeout>>
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+
   run_junit_tests:
     parameters:
       target:
index 56e1ca2dbcc19cd04d12f5930d678022ecdbf31d..36fabbdf0894a46638e1ad3e4054679c78f05e95 100644 (file)
@@ -1,5 +1,5 @@
---- config-2_1.yml     2022-05-30 12:06:34.000000000 -0400
-+++ config-2_1.yml.HIGHRES     2022-05-30 12:06:59.000000000 -0400
+--- config-2_1.yml     2022-05-30 12:09:35.000000000 -0400
++++ config-2_1.yml.HIGHRES     2022-05-30 12:10:16.000000000 -0400
 @@ -105,14 +105,14 @@
  j8_par_executor: &j8_par_executor
    executor:
index 5a14e3454bb0718035ec896d32b9cf0df4ed5693..90995a744a2ef2591db1ab46fcc91efc979b6289 100644 (file)
@@ -1,5 +1,5 @@
---- config-2_1.yml     2022-05-30 12:06:34.000000000 -0400
-+++ config-2_1.yml.MIDRES      2022-05-30 12:06:52.000000000 -0400
+--- config-2_1.yml     2022-05-30 12:09:35.000000000 -0400
++++ config-2_1.yml.MIDRES      2022-05-30 12:10:10.000000000 -0400
 @@ -105,14 +105,14 @@
  j8_par_executor: &j8_par_executor
    executor:
index a82e5d1915fffb416b5a0acb03f937315309b483..07b8fb3437ab787b7c6184f4a0aec44be833dfc0 100644 (file)
@@ -1152,6 +1152,112 @@ jobs:
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_simulator_dtests:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Simulator Tests
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          ant test-simulator-dtest
+        no_output_timeout: 30m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_UTEST_TARGET: testsome
+    - REPEATED_UTEST_CLASS: null
+    - REPEATED_UTEST_METHODS: null
+    - REPEATED_UTEST_VNODES: false
+    - REPEATED_UTEST_COUNT: 100
+    - REPEATED_UTEST_STOP_ON_FAILURE: false
+    - REPEATED_DTEST_NAME: null
+    - REPEATED_DTEST_VNODES: false
+    - REPEATED_DTEST_COUNT: 100
+    - REPEATED_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_UPGRADE_DTEST_NAME: null
+    - REPEATED_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_JVM_UPGRADE_DTEST_CLASS: null
+    - REPEATED_JVM_UPGRADE_DTEST_METHODS: null
+    - REPEATED_JVM_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_JVM_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j8_cqlsh-dtests-py3-with-vnodes:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -3772,6 +3878,10 @@ workflows:
         requires:
         - start_j8_jvm_dtests
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - start_j8_jvm_dtests
+        - j8_build
     - j8_jvm_dtests_vnode:
         requires:
         - start_j8_jvm_dtests
@@ -3938,6 +4048,9 @@ workflows:
     - j8_unit_tests:
         requires:
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - j8_build
     - j8_jvm_dtests:
         requires:
         - j8_build
index 36d51c922c451ef5f0f635bfeda928fc84bc2689..bdb5a82d52b7018d1a2eea4b8278af879a52b6d7 100644 (file)
@@ -1152,6 +1152,112 @@ jobs:
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_simulator_dtests:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Simulator Tests
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          ant test-simulator-dtest
+        no_output_timeout: 30m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_UTEST_TARGET: testsome
+    - REPEATED_UTEST_CLASS: null
+    - REPEATED_UTEST_METHODS: null
+    - REPEATED_UTEST_VNODES: false
+    - REPEATED_UTEST_COUNT: 100
+    - REPEATED_UTEST_STOP_ON_FAILURE: false
+    - REPEATED_DTEST_NAME: null
+    - REPEATED_DTEST_VNODES: false
+    - REPEATED_DTEST_COUNT: 100
+    - REPEATED_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_UPGRADE_DTEST_NAME: null
+    - REPEATED_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_JVM_UPGRADE_DTEST_CLASS: null
+    - REPEATED_JVM_UPGRADE_DTEST_METHODS: null
+    - REPEATED_JVM_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_JVM_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j8_cqlsh-dtests-py3-with-vnodes:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -3772,6 +3878,10 @@ workflows:
         requires:
         - start_j8_jvm_dtests
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - start_j8_jvm_dtests
+        - j8_build
     - j8_jvm_dtests_vnode:
         requires:
         - start_j8_jvm_dtests
@@ -3938,6 +4048,9 @@ workflows:
     - j8_unit_tests:
         requires:
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - j8_build
     - j8_jvm_dtests:
         requires:
         - j8_build
index a82e5d1915fffb416b5a0acb03f937315309b483..07b8fb3437ab787b7c6184f4a0aec44be833dfc0 100644 (file)
@@ -1152,6 +1152,112 @@ jobs:
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_simulator_dtests:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Simulator Tests
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          ant test-simulator-dtest
+        no_output_timeout: 30m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_UTEST_TARGET: testsome
+    - REPEATED_UTEST_CLASS: null
+    - REPEATED_UTEST_METHODS: null
+    - REPEATED_UTEST_VNODES: false
+    - REPEATED_UTEST_COUNT: 100
+    - REPEATED_UTEST_STOP_ON_FAILURE: false
+    - REPEATED_DTEST_NAME: null
+    - REPEATED_DTEST_VNODES: false
+    - REPEATED_DTEST_COUNT: 100
+    - REPEATED_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_UPGRADE_DTEST_NAME: null
+    - REPEATED_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_JVM_UPGRADE_DTEST_CLASS: null
+    - REPEATED_JVM_UPGRADE_DTEST_METHODS: null
+    - REPEATED_JVM_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_JVM_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j8_cqlsh-dtests-py3-with-vnodes:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -3772,6 +3878,10 @@ workflows:
         requires:
         - start_j8_jvm_dtests
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - start_j8_jvm_dtests
+        - j8_build
     - j8_jvm_dtests_vnode:
         requires:
         - start_j8_jvm_dtests
@@ -3938,6 +4048,9 @@ workflows:
     - j8_unit_tests:
         requires:
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - j8_build
     - j8_jvm_dtests:
         requires:
         - j8_build
index 273c109921879166b8161b7227c06a8f21c8948d..dac7e9e351a5911dbfb374ab6103fbd6a57f47ff 100644 (file)
@@ -1152,6 +1152,112 @@ jobs:
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_simulator_dtests:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Simulator Tests
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          ant test-simulator-dtest
+        no_output_timeout: 30m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_UTEST_TARGET: testsome
+    - REPEATED_UTEST_CLASS: null
+    - REPEATED_UTEST_METHODS: null
+    - REPEATED_UTEST_VNODES: false
+    - REPEATED_UTEST_COUNT: 100
+    - REPEATED_UTEST_STOP_ON_FAILURE: false
+    - REPEATED_DTEST_NAME: null
+    - REPEATED_DTEST_VNODES: false
+    - REPEATED_DTEST_COUNT: 100
+    - REPEATED_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_UPGRADE_DTEST_NAME: null
+    - REPEATED_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_JVM_UPGRADE_DTEST_CLASS: null
+    - REPEATED_JVM_UPGRADE_DTEST_METHODS: null
+    - REPEATED_JVM_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_JVM_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j8_cqlsh-dtests-py3-with-vnodes:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -3772,6 +3878,10 @@ workflows:
         requires:
         - start_j8_jvm_dtests
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - start_j8_jvm_dtests
+        - j8_build
     - j8_jvm_dtests_vnode:
         requires:
         - start_j8_jvm_dtests
@@ -3938,6 +4048,9 @@ workflows:
     - j8_unit_tests:
         requires:
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - j8_build
     - j8_jvm_dtests:
         requires:
         - j8_build
index fd3b0c5da3f286a2cd73f9276160e983f249a38c..6097abe8977fe817a6f622056c1dc77be82f4ef2 100644 (file)
@@ -1,9 +1,91 @@
-4.1-alpha2
+4.2
+ * DataOutputBuffer#scratchBuffer can use off-heap or on-heap memory as a means to control memory allocations (CASSANDRA-16471)
+ * Add ability to read the TTLs and write times of the elements of a collection and/or UDT (CASSANDRA-8877)
+ * Removed Python < 2.7 support from formatting.py (CASSANDRA-17694)
+ * Cleanup pylint issues with pylexotron.py (CASSANDRA-17779)
+ * NPE bug in streaming checking if SSTable is being repaired (CASSANDRA-17801)
+ * Users of NativeLibrary should handle lack of JNA appropriately when running in client mode (CASSANDRA-17794)
+ * Warn on unknown directories found in system keyspace directory rather than kill node during startup checks (CASSANDRA-17777)
+ * Log duplicate rows sharing a partition key found in verify and scrub (CASSANDRA-17789)
+ * Add separate thread pool for Secondary Index building so it doesn't block compactions (CASSANDRA-17781)
+ * Added JMX call to getSSTableCountPerTWCSBucket for TWCS (CASSANDRA-17774)
+ * When doing a host replacement, -Dcassandra.broadcast_interval_ms is used to know when to check the ring but checks that the ring wasn't changed in -Dcassandra.ring_delay_ms, changes to ring delay should not depend on when we publish load stats (CASSANDRA-17776)
+ * When bootstrap fails, CassandraRoleManager may attempt to do read queries that fail with "Cannot read from a bootstrapping node", and increments unavailables counters (CASSANDRA-17754)
+ * Add guardrail to disallow DROP KEYSPACE commands (CASSANDRA-17767)
+ * Remove ephemeral snapshot marker file and introduce a flag to SnapshotManifest (CASSANDRA-16911)
+ * Add a virtual table that exposes currently running queries (CASSANDRA-15241)
+ * Allow sstableloader to specify table without relying on path (CASSANDRA-16584)
+ * Fix TestGossipingPropertyFileSnitch.test_prefer_local_reconnect_on_listen_address (CASSANDRA-17700)
+ * Add ByteComparable API (CASSANDRA-6936)
+ * Add guardrail for maximum replication factor (CASSANDRA-17500)
+ * Increment CQLSH to version 6.2.0 for release 4.2 (CASSANDRA-17646)
+ * Adding support to perform certificate based internode authentication (CASSANDRA-17661)
+ * Option to disable CDC writes of repaired data (CASSANDRA-17666)
+ * When a node is bootstrapping it gets the whole gossip state but applies in random order causing some cases where StorageService will fail causing an instance to not show up in TokenMetadata (CASSANDRA-17676)
+ * Add CQLSH command SHOW REPLICAS (CASSANDRA-17577)
+ * Add guardrail to allow disabling of SimpleStrategy (CASSANDRA-17647)
+ * Change default directory permission to 750 in packaging (CASSANDRA-17470)
+ * Adding support for TLS client authentication for internode communication (CASSANDRA-17513)
+ * Add new CQL function maxWritetime (CASSANDRA-17425)
+ * Add guardrail for ALTER TABLE ADD / DROP / REMOVE column operations (CASSANDRA-17495)
+ * Rename DisableFlag class to EnableFlag on guardrails (CASSANDRA-17544)
+Merged from 4.1:
+ * Fix a race condition where a keyspace can be oopened while it is being removed (CASSANDRA-17658)
+ * DatabaseDescriptor will set the default failure detector during client initialization (CASSANDRA-17782)
+ * Avoid initializing schema via SystemKeyspace.getPreferredIP() with the BulkLoader tool (CASSANDRA-17740)
+ * Uncomment prepared_statements_cache_size, key_cache_size, counter_cache_size, index_summary_capacity which were
+   commented out by mistake in a previous patch
+   Fix breaking change with cache_load_timeout; cache_load_timeout_seconds <=0 and cache_load_timeout=0 are equivalent
+   and they both mean disabled
+   Deprecate public method setRate(final double throughputMbPerSec) in Compaction Manager in favor of
+   setRateInBytes(final double throughputBytesPerSec)
+   Revert breaking change removal of StressCQLSSTableWriter.Builder.withBufferSizeInMB(int size). Deprecate it in favor
+   of StressCQLSSTableWriter.Builder.withBufferSizeInMiB(int size)
+   Fix precision issues, add new -m flag (for nodetool/setstreamthroughput, nodetool/setinterdcstreamthroughput,
+   nodetool/getstreamthroughput and nodetoo/getinterdcstreamthroughput), add new -d flags (nodetool/getstreamthroughput, nodetool/getinterdcstreamthroughput, nodetool/getcompactionthroughput)
+   Fix a bug with precision in nodetool/compactionstats
+   Deprecate StorageService methods and add new ones for stream_throughput_outbound, inter_dc_stream_throughput_outbound,
+   compaction_throughput_outbound in the JMX MBean `org.apache.cassandra.db:type=StorageService`
+   Removed getEntireSSTableStreamThroughputMebibytesPerSec in favor of new getEntireSSTableStreamThroughputMebibytesPerSecAsDouble
+   in the JMX MBean `org.apache.cassandra.db:type=StorageService`
+   Removed getEntireSSTableInterDCStreamThroughputMebibytesPerSec in favor of getEntireSSTableInterDCStreamThroughputMebibytesPerSecAsDouble
+   in the JMX MBean `org.apache.cassandra.db:type=StorageService` (CASSANDRA-17725)
+ * Fix sstable_preemptive_open_interval disabled value. sstable_preemptive_open_interval = null backward compatible with
+   sstable_preemptive_open_interval_in_mb = -1 (CASSANDRA-17737)
+ * Remove usages of Path#toFile() in the snapshot apparatus (CASSANDRA-17769)
+ * Fix Settings Virtual Table to update paxos_variant after startup and rename enable_uuid_sstable_identifiers to
+   uuid_sstable_identifiers_enabled as per our config naming conventions (CASSANDRA-17738)
+ * index_summary_resize_interval_in_minutes = -1 is equivalent to index_summary_resize_interval being set to null or
+   disabled. JMX MBean IndexSummaryManager, setResizeIntervalInMinutes method still takes resizeIntervalInMinutes = -1 for disabled (CASSANDRA-17735)
+ * min_tracked_partition_size_bytes parameter from 4.1 alpha1 was renamed to min_tracked_partition_size (CASSANDRA-17733)
+ * Remove commons-lang dependency during build runtime (CASSANDRA-17724)
+ * Relax synchronization on StreamSession#onError() to avoid deadlock (CASSANDRA-17706)
+ * Fix AbstractCell#toString throws MarshalException for cell in collection (CASSANDRA-17695)
+ * Add new vtable output option to compactionstats (CASSANDRA-17683)
+ * Fix commitLogUpperBound initialization in AbstractMemtableWithCommitlog (CASSANDRA-17587)
+ * Fix widening to long in getBatchSizeFailThreshold (CASSANDRA-17650)
+ * Fix widening from mebibytes to bytes in IntMebibytesBound (CASSANDRA-17716)
  * Revert breaking change in nodetool clientstats and expose cient options through nodetool clientstats --client-options. (CASSANDRA-17715)
  * Fix missed nowInSec values in QueryProcessor (CASSANDRA-17458)
  * Revert removal of withBufferSizeInMB(int size) in CQLSSTableWriter.Builder class and deprecate it in favor of withBufferSizeInMiB(int size) (CASSANDRA-17675)
  * Remove expired snapshots of dropped tables after restart (CASSANDRA-17619)
 Merged from 4.0:
+ * Add 'noboolean' rpm build for older distros like CentOS7 (CASSANDRA-17765)
+ * Fix default value for compaction_throughput_mb_per_sec in Config class to match  the one in cassandra.yaml (CASSANDRA-17790)
+ * Fix Setting Virtual Table - update after startup config properties gc_log_threshold_in_ms, gc_warn_threshold_in_ms,
+   conf.index_summary_capacity_in_mb, prepared_statements_cache_size_mb, key_cache_size_in_mb, counter_cache_size_in_mb
+   (CASSANDRA-17737)
+ * Clean up ScheduledExecutors, CommitLog, and MessagingService shutdown for in-JVM dtests (CASSANDRA-17731)
+ * Remove extra write to system table for prepared statements (CASSANDRA-17764)
+Merged from 3.11:
+ * Document usage of closed token intervals in manual compaction (CASSANDRA-17575)
+ * Creating of a keyspace on insufficient number of replicas should filter out gosspping-only members (CASSANDRA-17759)
+Merged from 3.0:
+ * Fix restarting of services on gossipping-only member (CASSANDRA-17752)
+
+
+4.0.5
+ * Utilise BTree improvements to reduce garbage and improve throughput (CASSANDRA-15511)
  * SSL storage port in sstableloader is deprecated (CASSANDRA-17602)
  * Fix counter write timeouts at ONE (CASSANDRA-17411)
  * Fix NPE in getLocalPrimaryRangeForEndpoint (CASSANDRA-17680)
@@ -23,6 +105,13 @@ Merged from 3.0:
  * Fix repair_request_timeout_in_ms and remove paxos_auto_repair_threshold_mb (CASSANDRA-17557)
  * Incremental repair leaks SomeRepairFailedException after switch away from flatMap (CASSANDRA-17620)
  * StorageService read threshold get methods throw NullPointerException due to not handling null configs (CASSANDRA-17593)
+Merged from 4.0:
+ * Ensure FileStreamTask cannot compromise shared channel proxy for system table when interrupted (CASSANDRA-17663)
+Merged from 3.11:
+Merged from 3.0:
+
+
+4.1
  * Rename truncate_drop guardrail to drop_truncate_table (CASSANDRA-17592)
  * nodetool enablefullquerylog can NPE when directory has no files (CASSANDRA-17595)
  * Add auto_snapshot_ttl configuration (CASSANDRA-16790)
@@ -186,6 +275,7 @@ Merged from 3.0:
  * GossiperTest.testHasVersion3Nodes didn't take into account trunk version changes, fixed to rely on latest version (CASSANDRA-16651)
  * Update JNA library to 5.9.0 and snappy-java to version 1.1.8.4 (CASSANDRA-17040)
 Merged from 4.0:
+ * silence benign SslClosedEngineException (CASSANDRA-17565)
 Merged from 3.11:
 Merged from 3.0:
  * Fix issue where frozen maps may not be serialized in the correct order (CASSANDRA-17623)
index 9f0332c9e1af13beade99e7868a5b7aef04c0472..96ad4b9ac0c783ac30163450195e769f2a23bea5 100644 (file)
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -51,6 +51,41 @@ restore snapshots created with the previous major version using the
 'sstableloader' tool. You can upgrade the file format of your snapshots
 using the provided 'sstableupgrade' tool.
 
+
+4.2
+===
+
+New features
+------------
+    - Added a new configuration cdc_on_repair_enabled to toggle whether CDC mutations are replayed through the
+      write path on streaming, e.g. repair. When enabled, CDC data streamed to the destination node will be written into
+      commit log first. When disabled, the streamed CDC data is written into SSTables just the same as normal streaming.
+      If this is set to false, streaming will be considerably faster however it's possible that, in extreme situations
+      (losing > quorum # nodes in a replica set), you may have data in your SSTables that never makes it to the CDC log.
+      The default is true/enabled. The configuration can be altered via JMX.
+    - Added support for reading the write times and TTLs of the elements of collections and UDTs, regardless of being
+      frozen or not. The CQL functions writetime, maxwritetime and ttl can now be applied to entire collections/UDTs,
+      single collection/UDT elements and slices of collection/UDT elements.
+    - Added a new CQL function, maxwritetime. It shows the largest unix timestamp that the data was written, similar to
+      its sibling CQL function, writetime.
+    - New Guardrails added:
+      - Whether ALTER TABLE commands are allowed to mutate columns
+      - Whether SimpleStrategy is allowed on keyspace creation or alteration
+      - Maximum replication factor
+      - Whether DROP KEYSPACE commands are allowed.
+    - It is possible to list ephemeral snapshots by nodetool listsnaphots command when flag "-e" is specified.
+
+Upgrading
+---------
+    - Emphemeral marker files for snapshots done by repairs are not created anymore, 
+      there is a dedicated flag in snapshot manifest instead. On upgrade of a node to version 4.2, on node's start, in case there 
+      are such ephemeral snapshots on disk, they will be deleted (same behaviour as before) and any new ephemeral snapshots 
+      will stop to create ephemeral marker files as flag in a snapshot manifest was introduced instead.
+
+Deprecation
+-----------
+
+
 4.1
 ===
 
@@ -95,6 +130,7 @@ New features
       native_transport_max_requests_per_second in cassandra.yaml.
     - Support for pre hashing passwords on CQL DCL commands
     - Expose all client options via system_views.clients and nodetool clientstats --client-options.
+    - Add new nodetool compactionstats --vtable option to match the sstable_tasks vtable.
     - Support for String concatenation has been added through the + operator.
     - New configuration max_hints_size_per_host to limit the size of local hints files per host in mebibytes. Setting to
       non-positive value disables the limit, which is the default behavior. Setting to a positive value to ensure
@@ -164,6 +200,15 @@ New features
 
 Upgrading
 ---------
+    - `cache_load_timeout_seconds` being negative for disabled is equivalent to `cache_load_timeout` = 0 for disabled.
+    - `sstable_preemptive_open_interval_in_mb` being negative for disabled is equivalent to `sstable_preemptive_open_interval`
+      being null again. In the JMX MBean `org.apache.cassandra.db:type=StorageService`, the setter method
+      `setSSTablePreemptiveOpenIntervalInMB`still takes `intervalInMB` negative numbers for disabled.
+    - `enable_uuid_sstable_identifiers` parameter from 4.1 alpha1 was renamed to `uuid_sstable_identifiers_enabled`.
+    - `index_summary_resize_interval_in_minutes = -1` is equivalent to index_summary_resize_interval being set to `null` or
+      disabled. In the JMX MBean `org.apache.cassandra.db:type=IndexSummaryManager`, the setter method `setResizeIntervalInMinutes` still takes
+      `resizeIntervalInMinutes = -1` for disabled.
+    - min_tracked_partition_size_bytes parameter from 4.1 alpha1 was renamed to min_tracked_partition_size.
     - Parameters of type data storage, duration and data rate cannot be set to Long.MAX_VALUE (former parameters of long type)
       and Integer.MAX_VALUE (former parameters of int type). Those numbers are used during conversion between units to prevent
       an overflow from happening. (CASSANDRA-17571)
@@ -224,7 +269,21 @@ Upgrading
 
 Deprecation
 -----------
-    - `withBufferSizeInMB(int size)` in CQLSSTableWriter.Builder class is deprecated in favor of withBufferSizeInMiB(int size)
+    - In the JMX MBean `org.apache.cassandra.db:type=StorageService`: deprecate getter method `getStreamThroughputMbitPerSec`
+      in favor of getter method `getStreamThroughputMbitPerSecAsDouble`; deprecate getter method `getStreamThroughputMbPerSec`
+      in favor of getter methods `getStreamThroughputMebibytesPerSec` and `getStreamThroughputMebibytesPerSecAsDouble`;
+      deprecate getter method `getInterDCStreamThroughputMbitPerSec` in favor of getter method `getInterDCStreamThroughputMbitPerSecAsDouble`;
+      deprecate getter method `getInterDCStreamThroughputMbPerSec` in favor of getter methods `getInterDCStreamThroughputMebibytesPerSecAsDouble`;
+      deprecate getter method `getCompactionThroughputMbPerSec` in favor of getter methods `getCompactionThroughtputMibPerSecAsDouble`
+      and `getCompactionThroughtputBytesPerSec`; deprecate setter methods `setStreamThroughputMbPerSec` and `setStreamThroughputMbitPerSec`
+      in favor of `setStreamThroughputMebibytesPerSec`; deprecate setter methods `setInterDCStreamThroughputMbitPerSec` and
+      `setInterDCStreamThroughputMbPerSec` in favor of `setInterDCStreamThroughputMebibytesPerSec`. See CASSANDRA-17725 for further details.
+    - Deprecate public method `setRate(final double throughputMbPerSec)` in `Compaction Manager` in favor of
+      `setRateInBytes(final double throughputBytesPerSec)`
+    - `withBufferSizeInMB(int size)` in `StressCQLSSTableWriter.Builder` class is deprecated in favor of `withBufferSizeInMiB(int size)`
+      No change of functionality in the new one, only name change for clarity in regards to units and to follow naming
+      standartization.
+    - `withBufferSizeInMB(int size)` in `CQLSSTableWriter.Builder` class is deprecated in favor of `withBufferSizeInMiB(int size)`
       No change of functionality in the new one, only name change for clarity in regards to units and to follow naming
       standartization.
     - The properties `keyspace_count_warn_threshold` and `table_count_warn_threshold` in cassandra.yaml have been
@@ -331,6 +390,10 @@ New features
 
 Upgrading
 ---------
+    - If you were on 4.0.1 - 4.0.5 and if you haven't set the compaction_thoroughput_mb_per_sec in your 4.0 cassandra.yaml
+      file but you relied on the internal default value,then compaction_throughput_mb_per_sec was equal to an old default
+      value of 16MiB/s in Cassandra 4.0. After CASSANDRA-17790 this is changed to 64MiB/s to match the default value in
+      cassandra.yaml. If you prefer the old one of 16MiB/s, you need to set it explicitly in your cassandra.yaml file.
     - otc_coalescing_strategy, otc_coalescing_window_us, otc_coalescing_enough_coalesced_messages,
       otc_backlog_expiration_interval_ms are deprecated and will be removed at earliest with next major release.
       otc_coalescing_strategy is disabled since 3.11.
index 3c40bf49700269fe0a85503ce41bb83560858e54..cba3a2b42450ebbda70786dc7dd581ef3c45316b 100644 (file)
@@ -39,7 +39,7 @@ be sitting in front of a prompt:
 
 ----
 Connected to Test Cluster at localhost:9160.
-[cqlsh 6.0.0 | Cassandra 4.0.2 | CQL spec 3.4.5 | Native protocol v5]
+[cqlsh 6.2.0 | Cassandra 4.2-SNAPSHOT | CQL spec 3.4.6 | Native protocol v5]
 Use HELP for help.
 cqlsh>
 ----
index 637c95e70aa2542b977cbea8c3295cc5779c9197..e47bc5951852a17ee76957f18d146e2788d00541 100755 (executable)
@@ -47,7 +47,7 @@ if platform.python_implementation().startswith('Jython'):
 UTF8 = 'utf-8'
 
 description = "CQL Shell for Apache Cassandra"
-version = "6.1.0"
+version = "6.2.0"
 
 readline = None
 try:
@@ -600,6 +600,13 @@ class Shell(cmd.Cmd):
     def show_session(self, sessionid, partial_session=False):
         print_trace_session(self, self.session, sessionid, partial_session)
 
+    def show_replicas(self, token_value, keyspace=None):
+        ks = self.current_keyspace if keyspace is None else keyspace
+        token_map = self.conn.metadata.token_map
+        nodes = token_map.get_replicas(ks, token_map.token_class(token_value))
+        addresses = [x.address for x in nodes]
+        print(f"{addresses}")
+
     def get_connection_versions(self):
         result, = self.session.execute("select * from system.local where key = 'local'")
         vers = {
@@ -979,7 +986,7 @@ class Shell(cmd.Cmd):
         if parsed:
             self.printerr('Improper %s command (problem at %r).' % (cmdword, parsed.remainder[0]))
         else:
-            self.printerr('Improper %s command.' % cmdword)
+            self.printerr(f'Improper {cmdword} command.')
 
     def do_use(self, parsed):
         ksname = parsed.get_binding('ksname')
@@ -1578,6 +1585,11 @@ class Shell(cmd.Cmd):
         SHOW SESSION <sessionid>
 
           Pretty-prints the requested tracing session.
+
+        SHOW REPLICAS <token> (<keyspace>)
+
+          Lists the replica nodes by IP address for the given token. The current
+          keyspace is used if one is not specified.
         """
         showwhat = parsed.get_binding('what').lower()
         if showwhat == 'version':
@@ -1588,6 +1600,10 @@ class Shell(cmd.Cmd):
         elif showwhat.startswith('session'):
             session_id = parsed.get_binding('sessionid').lower()
             self.show_session(UUID(session_id))
+        elif showwhat.startswith('replicas'):
+            token_id = parsed.get_binding('token')
+            keyspace = parsed.get_binding('keyspace')
+            self.show_replicas(token_id, keyspace)
         else:
             self.printerr('Wait, how do I show %r?' % (showwhat,))
 
index a4030a6a9c00ba49ae2b5fed984dcf2bebf11d49..ca346c9f28c9f8d0f67592f385a7240ebe4c6f47 100644 (file)
--- a/build.xml
+++ b/build.xml
@@ -33,7 +33,7 @@
     <property name="debuglevel" value="source,lines,vars"/>
 
     <!-- default version and SCM information -->
-    <property name="base.version" value="4.1-alpha2"/>
+    <property name="base.version" value="4.2"/>
     <property name="scm.connection" value="scm:https://gitbox.apache.org/repos/asf/cassandra.git"/>
     <property name="scm.developerConnection" value="scm:https://gitbox.apache.org/repos/asf/cassandra.git"/>
     <property name="scm.url" value="https://gitbox.apache.org/repos/asf?p=cassandra.git;a=tree"/>
@@ -76,8 +76,8 @@
     <property name="test.simulator-asm.src" value="${test.dir}/simulator/asm"/>
     <property name="test.simulator-bootstrap.src" value="${test.dir}/simulator/bootstrap"/>
     <property name="test.simulator-test.src" value="${test.dir}/simulator/test"/>
-    <property name="test.driver.connection_timeout_ms" value="5000"/>
-    <property name="test.driver.read_timeout_ms" value="12000"/>
+    <property name="test.driver.connection_timeout_ms" value="10000"/>
+    <property name="test.driver.read_timeout_ms" value="24000"/>
     <property name="test.jvm.args" value="" />
     <property name="dist.dir" value="${build.dir}/dist"/>
     <property name="tmp.dir" value="${java.io.tmpdir}"/>
     <property name="maven-repository-url" value="https://repository.apache.org/content/repositories/snapshots"/>
     <property name="maven-repository-id" value="apache.snapshots.https"/>
 
-    <property name="test.timeout" value="240000" />
+    <property name="test.timeout" value="480000" />
     <property name="test.memory.timeout" value="480000" />
     <property name="test.long.timeout" value="600000" />
     <property name="test.burn.timeout" value="60000000" />
     <property name="test.distributed.timeout" value="900000" />
+    <property name="test.simulation.timeout" value="1800000" />
 
     <!-- default for cql tests. Can be override by -Dcassandra.test.use_prepared=false -->
     <property name="cassandra.test.use_prepared" value="true" />
           <dependency groupId="org.apache.hadoop" artifactId="hadoop-core" version="1.0.3" scope="provided">
             <exclusion groupId="org.mortbay.jetty" artifactId="servlet-api"/>
             <exclusion groupId="commons-logging" artifactId="commons-logging"/>
+            <exclusion groupId="commons-lang" artifactId="commons-lang"/>
             <exclusion groupId="org.eclipse.jdt" artifactId="core"/>
             <exclusion groupId="ant" artifactId="ant"/>
             <exclusion groupId="junit" artifactId="junit"/>
             <exclusion groupId="net.java.dev.jna" artifactId="jna" />
             <exclusion groupId="net.java.dev.jna" artifactId="jna-platform" />
           </dependency>
-          <dependency groupId="com.google.code.findbugs" artifactId="jsr305" version="2.0.2" scope="provided"/>
+          <dependency groupId="com.google.code.findbugs" artifactId="jsr305" version="2.0.2"/>
           <dependency groupId="com.clearspring.analytics" artifactId="stream" version="2.5.2">
             <exclusion groupId="it.unimi.dsi" artifactId="fastutil" />
           </dependency>
     ant testsome -Dtest.name=org.apache.cassandra.service.StorageServiceServerTest -Dtest.methods=testRegularMode,testGetAllRangesEmpty
   -->
   <target name="testsome" depends="build-test" description="Execute specific unit tests" >
+    <condition property="withoutMethods">
+      <and>
+        <equals arg1="${test.methods}" arg2=""/>
+        <not>
+          <contains string="${test.name}" substring="*"/>
+        </not>
+      </and>
+    </condition>
+    <condition property="withMethods">
+      <and>
+        <not>
+         <equals arg1="${test.methods}" arg2=""/>
+        </not>
+        <not>
+          <contains string="${test.name}" substring="*"/>
+        </not>
+      </and>
+    </condition>
     <testmacro inputdir="${test.unit.src}" timeout="${test.timeout}">
-      <test unless:blank="${test.methods}" name="${test.name}" methods="${test.methods}" outfile="build/test/output/TEST-${test.name}-${test.methods}"/>
-      <test if:blank="${test.methods}" name="${test.name}" outfile="build/test/output/TEST-${test.name}"/>
+      <test if="withMethods" name="${test.name}" methods="${test.methods}" outfile="build/test/output/TEST-${test.name}-${test.methods}"/>
+      <test if="withoutMethods" name="${test.name}" outfile="build/test/output/TEST-${test.name}"/>
       <jvmarg value="-Dlegacy-sstable-root=${test.data}/legacy-sstables"/>
       <jvmarg value="-Dinvalid-legacy-sstable-root=${test.data}/invalid-legacy-sstables"/>
       <jvmarg value="-Dcassandra.ring_delay_ms=1000"/>
       <jvmarg value="-Dcassandra.tolerate_sstable_size=true"/>
       <jvmarg value="-Dcassandra.skip_sync=true" />
     </testmacro>
-    <testmacro inputdir="${test.simulator-test.src}" timeout="${test.distributed.timeout}" forkmode="perTest" showoutput="true" filter="**/test/${test.name}.java">
+  </target>
+
+  <target name="test-simulator-dtest" depends="build-test" description="Execute simulator dtests">
+    <testmacro inputdir="${test.simulator-test.src}" timeout="${test.simulation.timeout}" forkmode="perTest" showoutput="true" filter="**/test/${test.name}.java">
       <jvmarg value="-Dlogback.configurationFile=test/conf/logback-simulator.xml"/>
       <jvmarg value="-Dcassandra.ring_delay_ms=10000"/>
       <jvmarg value="-Dcassandra.tolerate_sstable_size=true"/>
       <jvmarg value="-Dcassandra.skip_sync=true" />
+      <jvmarg value="-Dcassandra.debugrefcount=false"/>
+      <jvmarg value="-Dcassandra.test.simulator.determinismcheck=strict"/>
       <!-- Support Simulator Tests -->
       <jvmarg line="-javaagent:${test.lib}/jars/simulator-asm.jar"/>
       <jvmarg line="-Xbootclasspath/a:${test.lib}/jars/simulator-bootstrap.jar"/>
       <jvmarg line="-XX:ActiveProcessorCount=4"/>
       <jvmarg line="-XX:-TieredCompilation"/>
+      <jvmarg line="-XX:-BackgroundCompilation"/>
+      <jvmarg line="-XX:CICompilerCount=1"/>
       <jvmarg line="-XX:Tier4CompileThreshold=1000"/>
       <jvmarg line="-XX:ReservedCodeCacheSize=256M"/>
+      <jvmarg line="-Xmx8G"/>
     </testmacro>
   </target>
 
index 9a71312e63a869c18f194072f289097a2f4f1404..8e2f90ee22cd1e6733ce4469c1dd5e5d0236cf3c 100644 (file)
        <property name="idFormat" value="blockSystemClock"/>
        <property name="influenceFormat" value="0"/>
     </module>
+
+    <module name="SuppressWithNearbyCommentFilter">
+      <property name="commentFormat" value="checkstyle: permit this invocation"/>
+      <property name="idFormat" value="blockPathToFile"/>
+      <property name="influenceFormat" value="0"/>
+    </module>
  
     <module name="RegexpSinglelineJava">
       <!-- block system time -->
     <module name="IllegalInstantiation">
       <property name="classes" value="java.io.File,java.lang.Thread,java.util.concurrent.FutureTask,java.util.concurrent.Semaphore,java.util.concurrent.CountDownLatch,java.util.concurrent.ScheduledThreadPoolExecutor,java.util.concurrent.ThreadPoolExecutor,java.util.concurrent.ForkJoinPool,java.lang.OutOfMemoryError"/>
     </module>
+
+    <module name="RegexpSinglelineJava">
+      <!-- block Path#toFile() -->
+      <property name="id" value="blockPathToFile"/>
+      <property name="format" value="toFile\(\)"/>
+      <property name="message" value="Avoid Path#toFile(), as some implementations may not support it." />
+    </module>
   </module>
 
 </module>
index 0b918b885253998ff35315f1a5140184dc38bca4..98d70a035fe2715122cadca4dfd13d74b0f2a1bd 100644 (file)
@@ -64,7 +64,7 @@ hinted_handoff_enabled: true
 # Min unit: ms
 max_hint_window: 3h
 
-# Maximum throttle in KBs per second, per delivery thread.  This will be
+# Maximum throttle in KiBs per second, per delivery thread.  This will be
 # reduced proportionally to the number of nodes in the cluster.  (If there
 # are two nodes in the cluster, each delivery thread will use the maximum
 # rate; if there are three, each will throttle to half of the maximum,
@@ -86,7 +86,7 @@ max_hints_delivery_threads: 2
 # Min unit: ms
 hints_flush_period: 10000ms
 
-# Maximum size for a single hints file, in megabytes.
+# Maximum size for a single hints file, in mebibytes.
 # Min unit: MiB
 max_hints_file_size: 128MiB
 
@@ -121,7 +121,7 @@ auto_hints_cleanup_enabled: false
 #
 # hint_window_persistent_enabled: true
 
-# Maximum throttle in KBs per second, total. This will be
+# Maximum throttle in KiBs per second, total. This will be
 # reduced proportionally to the number of nodes in the cluster.
 # Min unit: KiB
 batchlog_replay_throttle: 1024KiB
@@ -298,6 +298,18 @@ partitioner: org.apache.cassandra.dht.Murmur3Partitioner
 # containing a CDC-enabled table if at space limit in cdc_raw_directory).
 cdc_enabled: false
 
+# Specify whether writes to the CDC-enabled tables should be blocked when CDC data on disk has reached to the limit.
+# When setting to false, the writes will not be blocked and the oldest CDC data on disk will be deleted to
+# ensure the size constraint. The default is true.
+# cdc_block_writes: true
+
+# Specify whether CDC mutations are replayed through the write path on streaming, e.g. repair.
+# When enabled, CDC data streamed to the destination node will be written into commit log first. When setting to false,
+# the streamed CDC data is written into SSTables just the same as normal streaming. The default is true.
+# If this is set to false, streaming will be considerably faster however it's possible that, in extreme situations
+# (losing > quorum # nodes in a replica set), you may have data in your SSTables that never makes it to the CDC log.
+# cdc_on_repair_enabled: true
+
 # CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the
 # segment contains mutations for a CDC-enabled table. This should be placed on a
 # separate spindle than the data directories. If not set, the default directory is
@@ -362,7 +374,7 @@ commit_failure_policy: stop
 #
 # Default value ("auto") is 1/256th of the heap or 10MiB, whichever is greater
 # Min unit: MiB
-prepared_statements_cache_size:
+prepared_statements_cache_size:
 
 # Maximum size of the key cache in memory.
 #
@@ -377,7 +389,7 @@ commit_failure_policy: stop
 #
 # Default value is empty to make it "auto" (min(5% of Heap (in MiB), 100MiB)). Set to 0 to disable key cache.
 # Min unit: MiB
-key_cache_size:
+key_cache_size:
 
 # Duration in seconds after which Cassandra should
 # save the key cache. Caches are saved to saved_caches_directory as
@@ -445,7 +457,7 @@ row_cache_save_period: 0s
 # Default value is empty to make it "auto" (min(2.5% of Heap (in MiB), 50MiB)). Set to 0 to disable counter cache.
 # NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
 # Min unit: MiB
-counter_cache_size:
+counter_cache_size:
 
 # Duration in seconds after which Cassandra should
 # save the counter cache (keys only). Caches are saved to saved_caches_directory as
@@ -647,7 +659,7 @@ memtable_allocation_type: heap_buffers
 # is 1/16th of the available heap. The main tradeoff is that smaller trees
 # have less resolution, which can lead to over-streaming data. If you see heap
 # pressure during repairs, consider lowering this, but you cannot go below
-# one megabyte. If you see lots of over-streaming, consider raising
+# one mebibyte. If you see lots of over-streaming, consider raising
 # this or using subrange repair.
 #
 # For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096.
@@ -719,11 +731,11 @@ memtable_allocation_type: heap_buffers
 # is a best-effort process. In extreme conditions Cassandra may need to use
 # more than this amount of memory.
 # Min unit: KiB
-index_summary_capacity:
+index_summary_capacity:
 
 # How frequently index summaries should be resampled.  This is done
 # periodically to redistribute memory from the fixed-size pool to sstables
-# proportional their recent read rates.  Setting to -1 will disable this
+# proportional their recent read rates.  Setting to null value will disable this
 # process, leaving existing index summaries at their current sampling level.
 # Min unit: m
 index_summary_resize_interval: 60m
@@ -992,6 +1004,8 @@ compaction_throughput: 64MiB/s
 # are completely written, and used in place of the prior sstables for
 # any range that has been written. This helps to smoothly transfer reads 
 # between the sstables, reducing page cache churn and keeping hot rows hot
+# Set sstable_preemptive_open_interval to null for disabled which is equivalent to
+# sstable_preemptive_open_interval_in_mb being negative
 # Min unit: MiB
 sstable_preemptive_open_interval: 50MiB
 
@@ -1000,7 +1014,7 @@ sstable_preemptive_open_interval: 50MiB
 # set to true, each newly created sstable will have a UUID based generation identifier and such files are
 # not readable by previous Cassandra versions. At some point, this option will become true by default
 # and eventually get removed from the configuration.
-enable_uuid_sstable_identifiers: false
+uuid_sstable_identifiers_enabled: false
 
 # When enabled, permits Cassandra to zero-copy stream entire eligible
 # SSTables between nodes, including every component.
@@ -1316,6 +1330,12 @@ server_encryption_options:
   # Set to a valid keystore if internode_encryption is dc, rack or all
   keystore: conf/.keystore
   keystore_password: cassandra
+  # During internode mTLS authentication, inbound connections (acting as servers) use keystore, keystore_password
+  # containing server certificate to create SSLContext and
+  # outbound connections (acting as clients) use outbound_keystore & outbound_keystore_password with client certificates
+  # to create SSLContext. By default, outbound_keystore is the same as keystore indicating mTLS is not enabled.
+#  outbound_keystore: conf/.keystore
+#  outbound_keystore_password: cassandra
   # Verify peer server certificates
   require_client_auth: false
   # Set to a valid trustore if require_client_auth is true
@@ -1665,58 +1685,78 @@ drop_compact_storage_enabled: false
 # The two thresholds default to -1 to disable.
 # keyspaces_warn_threshold: -1
 # keyspaces_fail_threshold: -1
+#
 # Guardrail to warn or fail when creating more user tables than threshold.
 # The two thresholds default to -1 to disable.
 # tables_warn_threshold: -1
 # tables_fail_threshold: -1
+#
 # Guardrail to enable or disable the ability to create uncompressed tables
 # uncompressed_tables_enabled: true
+#
 # Guardrail to warn or fail when creating/altering a table with more columns per table than threshold.
 # The two thresholds default to -1 to disable.
 # columns_per_table_warn_threshold: -1
 # columns_per_table_fail_threshold: -1
+#
 # Guardrail to warn or fail when creating more secondary indexes per table than threshold.
 # The two thresholds default to -1 to disable.
 # secondary_indexes_per_table_warn_threshold: -1
 # secondary_indexes_per_table_fail_threshold: -1
+#
 # Guardrail to enable or disable the creation of secondary indexes
 # secondary_indexes_enabled: true
+#
 # Guardrail to warn or fail when creating more materialized views per table than threshold.
 # The two thresholds default to -1 to disable.
 # materialized_views_per_table_warn_threshold: -1
 # materialized_views_per_table_fail_threshold: -1
+#
 # Guardrail to warn about, ignore or reject properties when creating tables. By default all properties are allowed.
 # table_properties_warned: []
 # table_properties_ignored: []
 # table_properties_disallowed: []
+#
 # Guardrail to allow/disallow user-provided timestamps. Defaults to true.
 # user_timestamps_enabled: true
+#
 # Guardrail to allow/disallow GROUP BY functionality.
 # group_by_enabled: true
+#
 # Guardrail to allow/disallow TRUNCATE and DROP TABLE statements
 # drop_truncate_table_enabled: true
+#
+# Guardrail to allow/disallow DROP KEYSPACE statements
+# drop_keyspace_enabled: true
+#
 # Guardrail to warn or fail when using a page size greater than threshold.
 # The two thresholds default to -1 to disable.
 # page_size_warn_threshold: -1
 # page_size_fail_threshold: -1
+#
 # Guardrail to allow/disallow list operations that require read before write, i.e. setting list element by index and
 # removing list elements by either index or value. Defaults to true.
 # read_before_write_list_operations_enabled: true
+#
 # Guardrail to warn or fail when querying with an IN restriction selecting more partition keys than threshold.
 # The two thresholds default to -1 to disable.
 # partition_keys_in_select_warn_threshold: -1
 # partition_keys_in_select_fail_threshold: -1
+#
 # Guardrail to warn or fail when an IN query creates a cartesian product with a size exceeding threshold,
 # eg. "a in (1,2,...10) and b in (1,2...10)" results in cartesian product of 100.
 # The two thresholds default to -1 to disable.
 # in_select_cartesian_product_warn_threshold: -1
 # in_select_cartesian_product_fail_threshold: -1
+#
 # Guardrail to warn about or reject read consistency levels. By default, all consistency levels are allowed.
 # read_consistency_levels_warned: []
 # read_consistency_levels_disallowed: []
+#
 # Guardrail to warn about or reject write consistency levels. By default, all consistency levels are allowed.
 # write_consistency_levels_warned: []
 # write_consistency_levels_disallowed: []
+#
 # Guardrail to warn or fail when encountering larger size of collection data than threshold.
 # At query time this guardrail is applied only to the collection fragment that is being writen, even though in the case
 # of non-frozen collections there could be unaccounted parts of the collection on the sstables. This is done this way to
@@ -1727,6 +1767,7 @@ drop_compact_storage_enabled: false
 # collection_size_warn_threshold:
 # Min unit: B
 # collection_size_fail_threshold:
+#
 # Guardrail to warn or fail when encountering more elements in collection than threshold.
 # At query time this guardrail is applied only to the collection fragment that is being writen, even though in the case
 # of non-frozen collections there could be unaccounted parts of the collection on the sstables. This is done this way to
@@ -1735,12 +1776,21 @@ drop_compact_storage_enabled: false
 # The two thresholds default to -1 to disable.
 # items_per_collection_warn_threshold: -1
 # items_per_collection_fail_threshold: -1
+#
 # Guardrail to allow/disallow querying with ALLOW FILTERING. Defaults to true.
 # allow_filtering_enabled: true
+#
+# Guardrail to allow/disallow setting SimpleStrategy via keyspace creation or alteration. Defaults to true.
+# simplestrategy_enabled: true
+#
 # Guardrail to warn or fail when creating a user-defined-type with more fields in than threshold.
 # Default -1 to disable.
 # fields_per_udt_warn_threshold: -1
 # fields_per_udt_fail_threshold: -1
+#
+# Guardrail to indicate whether or not users are allowed to use ALTER TABLE commands to make column changes to tables
+# alter_table_enabled: true
+#
 # Guardrail to warn or fail when local data disk usage percentage exceeds threshold. Valid values are in [1, 100].
 # This is only used for the disks storing data directories, so it won't count any separate disks used for storing
 # the commitlog, hints nor saved caches. The disk usage is the ratio between the amount of space used by the data
@@ -1752,7 +1802,8 @@ drop_compact_storage_enabled: false
 # The two thresholds default to -1 to disable.
 # data_disk_usage_percentage_warn_threshold: -1
 # data_disk_usage_percentage_fail_threshold: -1
-# Allows defining the max disk size of the data directories when calculating thresholds for
+#
+# Guardrail that allows users to define the max disk size of the data directories when calculating thresholds for
 # disk_usage_percentage_warn_threshold and disk_usage_percentage_fail_threshold, so if this is greater than zero they
 # become percentages of a fixed size on disk instead of percentages of the physically available disk size. This should
 # be useful when we have a large disk and we only want to use a part of it for Cassandra's data directories.
@@ -1760,11 +1811,17 @@ drop_compact_storage_enabled: false
 # Defaults to null to disable and use the physically available disk size of data directories during calculations.
 # Min unit: B
 # data_disk_usage_max_disk_size:
+#
 # Guardrail to warn or fail when the minimum replication factor is lesser than threshold.
 # This would also apply to system keyspaces.
 # Suggested value for use in production: 2 or higher
 # minimum_replication_factor_warn_threshold: -1
 # minimum_replication_factor_fail_threshold: -1
+#
+# Guardrail to warn or fail when the maximum replication factor is greater than threshold.
+# This would also apply to system keyspaces.
+# maximum_replication_factor_warn_threshold: -1
+# maximum_replication_factor_fail_threshold: -1
 
 # Startup Checks are executed as part of Cassandra startup process, not all of them
 # are configurable (so you can disable them) but these which are enumerated bellow.
index 752ff1f6bbcd134633b1f6a073b30e4214d24858..95882e3ae2255c0da16615524fd6b03800af2d93 100644 (file)
@@ -37,6 +37,8 @@ case "$1" in
         if [ -z "$2" ]; then
             chown -R cassandra: /var/lib/cassandra
             chown -R cassandra: /var/log/cassandra
+            chmod 750 /var/lib/cassandra/
+            chmod 750 /var/log/cassandra/
         fi
         if ! sysctl -p /etc/sysctl.d/cassandra.conf; then
             echo >&2
index 5c193b9550c11d38aa2ef61a1cfa04851ba047fc..b2397e52fb168a3b01ffdb5cfe93fb3ae88a2c24 100644 (file)
@@ -1,14 +1,8 @@
-cassandra (4.1~alpha2) UNRELEASED; urgency=medium
+cassandra (4.2) UNRELEASED; urgency=medium
 
   * New release
 
- -- Mick Semb Wever <mck@apache.org>  Fri, 20 May 2022 22:02:50 +0200
-
-cassandra (4.1~alpha1) unstable; urgency=medium
-
-  * New release
-
- -- Mick Semb Wever <mck@apache.org>  Fri, 20 May 2022 22:02:50 +0200
+ -- Mick Semb Wever <mck@apache.org>  Wed, 21 Apr 2021 19:24:28 +0200
 
 cassandra (4.0~rc1) unstable; urgency=medium
 
index 8bedf19a7801681cdb44094596d405b4f4c7796d..fde597052606fc86230cc140dda1902b6b397a8b 100644 (file)
@@ -1082,9 +1082,10 @@ bc(syntax)..
 
 <selector> ::= <identifier>
              | <term>
-             | WRITETIME '(' <identifier> ')'
+             | WRITETIME '(' <selector> ')'
+             | MAXWRITETIME '(' <selector> ')'
              | COUNT '(' '*' ')'
-             | TTL '(' <identifier> ')'
+             | TTL '(' <selector> ')'
              | CAST '(' <selector> AS <type> ')'
              | <function> '(' (<selector> (',' <selector>)*)? ')'
              | <selector> '.' <identifier>
@@ -1131,7 +1132,7 @@ h4(#selectSelection). @<select-clause>@
 
 The @<select-clause>@ determines which columns needs to be queried and returned in the result-set. It consists of either the comma-separated list of <selector> or the wildcard character (@*@) to select all the columns defined for the table. Please note that for wildcard @SELECT@ queries the order of columns returned is not specified and is not guaranteed to be stable between Cassandra versions.
 
-A @<selector>@ is either a column name to retrieve or a @<function>@ of one or more @<term>@s. The function allowed are the same as for @<term>@ and are described in the "function section":#functions. In addition to these generic functions, the @WRITETIME@ (resp. @TTL@) function allows to select the timestamp of when the column was inserted (resp. the time to live (in seconds) for the column (or null if the column has no expiration set)) and the "@CAST@":#castFun function can be used to convert one data type to another.
+A @<selector>@ is either a column name to retrieve or a @<function>@ of one or more @<term>@s. The function allowed are the same as for @<term>@ and are described in the "function section":#functions. In addition to these generic functions, the @WRITETIME@ and @MAXWRITETIME@ (resp. @TTL@) function allows to select the timestamp of when the column was inserted (resp. the time to live (in seconds) for the column (or null if the column has no expiration set)) and the "@CAST@":#castFun function can be used to convert one data type to another.
 
 Additionally, individual values of maps and sets can be selected using @[ <term> ]@. For maps, this will return the value corresponding to the key, if such entry exists. For sets, this will return the key that is selected if it exists and is thus mainly a way to check element existence. It is also possible to select a slice of a set or map with @[ <term> ... <term> @], where both bound can be omitted.
 
@@ -2052,7 +2053,7 @@ A number of functions are provided to "convert" the native types into binary dat
 h2(#aggregates). Aggregates
 
 Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.
-If @normal@ columns, @scalar functions@, @UDT@ fields, @writetime@ or @ttl@ are selected together with aggregate functions, the values returned for them will be the ones of the first row matching the query.
+If @normal@ columns, @scalar functions@, @UDT@ fields, @writetime@, @maxwritetime@ or @ttl@ are selected together with aggregate functions, the values returned for them will be the ones of the first row matching the query.
 
 CQL3 distinguishes between built-in aggregates (so called 'native aggregates') and "user-defined aggregates":#udas. CQL3 includes several native aggregates, described below:
 
@@ -2433,6 +2434,7 @@ CQL distinguishes between _reserved_ and _non-reserved_ keywords. Reserved keywo
 | @WHERE@        | yes |
 | @WITH@         | yes |
 | @WRITETIME@    | no  |
+| @MAXWRITETIME@    | no  |
 
 h2(#appendixB). Appendix B: CQL Reserved Types
 
index 7e17266a3f7e9c00dd29ef25d390a8d48f7d7fec..544afc009f15672509763572f324843ee7a0e686 100644 (file)
@@ -139,6 +139,7 @@ or not.
 |`WHERE` |yes
 |`WITH` |yes
 |`WRITETIME` |no
+|`MAXWRITETIME` |no
 |===
 
 == Appendix B: CQL Reserved Types
index 1f89469a328d0732d556ffb01d2db46be278d284..df99a39ef64a7ac5d86f6709378be4b2140f384b 100644 (file)
@@ -2,6 +2,16 @@
 
 The following describes the changes in each version of CQL.
 
+== 3.4.6
+
+* Add support for IF EXISTS and IF NOT EXISTS in ALTER statements  (`16916`)
+* Allow GRANT/REVOKE multiple permissions in a single statement (`17030`)
+* Pre hashed passwords in CQL (`17334`)
+* Add support for type casting in WHERE clause components and in the values of INSERT/UPDATE statements (`14337`)
+* Add support for CONTAINS and CONTAINS KEY in conditional UPDATE and DELETE statement (`10537`)
+* Allow to grant permission for all tables in a keyspace (`17027`)
+* Allow to aggregate by time intervals (`11871`)
+
 == 3.4.5
 
 * Adds support for arithmetic operators (`11935`)
index d99e12b8ed59a74257f26eb0a3520dbb360a31c5..3e8c47f2093fecc26372893944bfd1ed994eed16 100644 (file)
@@ -1645,6 +1645,7 @@ FROM  +
 ::=  +
 |  +
 | WRITETIME `(' `)' +
+| MAXWRITETIME `(' `)' +
 | COUNT `(' `*' `)' +
 | TTL `(' `)' +
 | CAST `(' AS `)' +
@@ -1706,8 +1707,8 @@ be stable between Cassandra versions.
 A `<selector>` is either a column name to retrieve or a `<function>` of
 one or more `<term>`s. The function allowed are the same as for `<term>`
 and are described in the link:#functions[function section]. In addition
-to these generic functions, the `WRITETIME` (resp. `TTL`) function
-allows to select the timestamp of when the column was inserted (resp.
+to these generic functions, the `WRITETIME` and `MAXWRITETIME` (resp. `TTL`)
+function allows to select the timestamp of when the column was inserted (resp.
 the time to live (in seconds) for the column (or null if the column has
 no expiration set)) and the link:#castFun[`CAST`] function can be used
 to convert one data type to another. The `WRITETIME` and `TTL` functions
@@ -3150,8 +3151,8 @@ is `0x0000000000000003` and `blobAsBigint(0x0000000000000003)` is `3`.
 
 Aggregate functions work on a set of rows. They receive values for each
 row and returns one value for the whole set. +
-If `normal` columns, `scalar functions`, `UDT` fields, `writetime` or
-`ttl` are selected together with aggregate functions, the values
+If `normal` columns, `scalar functions`, `UDT` fields, `writetime`, `maxwritetime`
+or `ttl` are selected together with aggregate functions, the values
 returned for them will be the ones of the first row matching the query.
 
 CQL3 distinguishes between built-in aggregates (so called `native
index d0517aaf34f6e379117529f3c7dd080b7d58c2e5..513dc1d1e5643003a7cedf578ec62353f2ed9cc7 100644 (file)
@@ -75,18 +75,21 @@ You must use the orignal column name instead.
 ====
 
 [[writetime-and-ttl-function]]
-==== `WRITETIME` and `TTL` function
+==== `WRITETIME`, `MAXWRITETIME` and `TTL` function
 
-Selection supports two special functions that aren't allowed anywhere
-else: `WRITETIME` and `TTL`. 
-Both functions take only one argument, a column name.
+Selection supports three special functions that aren't allowed anywhere
+else: `WRITETIME`, `MAXWRITETIME` and `TTL`.
+All functions take only one argument, a column name. If the column is a collection or UDT, it's possible to add element
+selectors, such as `WRITETTIME(phones[2..4])` or `WRITETTIME(user.name)`.
 These functions retrieve meta-information that is stored internally for each column:
 
-* `WRITETIME` stores the timestamp of the value of the column
+* `WRITETIME` stores the timestamp of the value of the column.
+* `MAXWRITETIME` stores the largest timestamp of the value of the column. For non-collection and non-UDT columns, `MAXWRITETIME`
+is equivalent to `WRITETIME`. In the other cases, it returns the largest timestamp of the values in the column.
 * `TTL` stores the remaining time to live (in seconds) for the value of the column if it is set to expire; otherwise the value is `null`.
 
-The `WRITETIME` and `TTL` functions can't be used on multi-cell columns such as non-frozen
-collections or non-frozen user-defined types.
+The `WRITETIME` and `TTL` functions can be used on multi-cell columns such as non-frozen collections or non-frozen
+user-defined types. In that case, the functions will return the list of timestamps or TTLs for each selected cell.
 
 [[where-clause]]
 === The `WHERE` clause
index aef6575d52e5c0c52e050470e2633f79d9e08ad3..df74db96d4b10b1b00eb67353a0957fe796a9d68 100644 (file)
@@ -1,23 +1,5 @@
 = Frequently Asked Questions
 
-* `why-cant-list-all`
-* `what-ports`
-* `what-happens-on-joins`
-* `asynch-deletes`
-* `one-entry-ring`
-* `can-large-blob`
-* `nodetool-connection-refused`
-* `to-batch-or-not-to-batch`
-* `selinux`
-* `how-to-unsubscribe`
-* `cassandra-eats-all-my-memory`
-* `what-are-seeds`
-* `are-seeds-SPOF`
-* `why-message-dropped`
-* `oom-map-failed`
-* `what-on-same-timestamp-update`
-* `why-bootstrapping-stream-error`
-
 [[why-cant-list-all]]
 == Why can't I set `listen_address` to listen on 0.0.0.0 (all my addresses)?
 
index 3e612580ba6367ec977c196cf76ba74ac3211738..7a7a4befa792e6f824a600501f3c8602a939b061 100644 (file)
@@ -124,7 +124,7 @@ cqlsh> SELECT * FROM system_views.clients;
 ------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  address          | 127.0.0.1
  port             | 50687
- client_options   | {'CQL_VERSION': '3.4.5', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
+ client_options   | {'CQL_VERSION': '3.4.6', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
  connection_stage | ready
  driver_name      | DataStax Python Driver
  driver_version   | 3.25.0
@@ -140,7 +140,7 @@ cqlsh> SELECT * FROM system_views.clients;
 ------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  address          | 127.0.0.1
  port             | 50688
- client_options   | {'CQL_VERSION': '3.4.5', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
+ client_options   | {'CQL_VERSION': '3.4.6', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
  connection_stage | ready
  driver_name      | DataStax Python Driver
  driver_version   | 3.25.0
index e3c76b8b9e08e75a52c7b267ed40d6e2cd1d4023..be2065690d609b8f04ea21774112e49a6d9b5465 100644 (file)
@@ -297,8 +297,7 @@ to compaction:
 `disableautocompaction`::
   Disable compaction.
 `setcompactionthroughput`::
-  How fast compaction should run at most - defaults to 16MB/s, but note
-  that it is likely not possible to reach this throughput.
+  How fast compaction should run at most - defaults to 64MiB/s.
 `compactionstats`::
   Statistics about current and pending compactions.
 `compactionhistory`::
index 8050ee5df04d2108b46991d8d5711a5091448a9f..0d40608c2c5d4dcc93f7bf810d611bc64d611c23 100644 (file)
@@ -181,6 +181,21 @@ cqlsh> SHOW HOST
 Connected to Prod_Cluster at 192.0.0.1:9042.
 ----
 
+=== `SHOW REPLICAS`
+
+Prints the IP addresses of the Cassandra nodes which are replicas for the
+listed given token and keyspace. This command is available from Cassandra 4.2.
+
+`Usage`: `SHOW REPLICAS <token> (<keyspace>)`
+
+Example usage:
+
+[source,none]
+----
+cqlsh> SHOW REPLICAS 95
+['192.0.0.1', '192.0.0.2']
+----
+
 === `SHOW SESSION`
 
 Pretty prints a specific tracing session.
index 719886d76638a3ec7063fda2d5b94d2f708dab28..6ba42672e476422dd425d3a5bef396472feccf79 100644 (file)
@@ -7,7 +7,7 @@
             <properties>
                 <property name="project.dir">..</property>
                 <!-- the compile classpaths should be distinct per compilation unit… but it is kept simple and the build will catch errors -->
-                <property name="cassandra.classpath.jars">${project.dir}/build/lib/jars/HdrHistogram-2.1.9.jar:${project.dir}/build/lib/jars/ST4-4.0.8.jar:${project.dir}/build/lib/jars/airline-0.8.jar:${project.dir}/build/lib/jars/antlr-3.5.2.jar:${project.dir}/build/lib/jars/antlr-runtime-3.5.2.jar:${project.dir}/build/lib/jars/asm-7.1.jar:${project.dir}/build/lib/jars/assertj-core-3.15.0.jar:${project.dir}/build/lib/jars/byteman-4.0.6.jar:${project.dir}/build/lib/jars/byteman-bmunit-4.0.6.jar:${project.dir}/build/lib/jars/byteman-install-4.0.6.jar:${project.dir}/build/lib/jars/byteman-submit-4.0.6.jar:${project.dir}/build/lib/jars/caffeine-2.3.5.jar:${project.dir}/build/lib/jars/cassandra-driver-core-3.11.0-shaded.jar:${project.dir}/build/lib/jars/chronicle-bytes-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-core-2.20.126.jar:${project.dir}/build/lib/jars/chronicle-queue-5.20.123.jar:${project.dir}/build/lib/jars/chronicle-threads-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-wire-2.20.117.jar:${project.dir}/build/lib/jars/commons-beanutils-1.7.0.jar:${project.dir}/build/lib/jars/commons-beanutils-core-1.8.0.jar:${project.dir}/build/lib/jars/commons-cli-1.1.jar:${project.dir}/build/lib/jars/commons-codec-1.9.jar:${project.dir}/build/lib/jars/commons-collections-3.2.1.jar:${project.dir}/build/lib/jars/commons-configuration-1.6.jar:${project.dir}/build/lib/jars/commons-digester-1.8.jar:${project.dir}/build/lib/jars/commons-el-1.0.jar:${project.dir}/build/lib/jars/commons-httpclient-3.0.1.jar:${project.dir}/build/lib/jars/commons-lang-2.4.jar:${project.dir}/build/lib/jars/commons-lang3-3.11.jar:${project.dir}/build/lib/jars/commons-math-2.1.jar:${project.dir}/build/lib/jars/commons-math3-3.2.jar:${project.dir}/build/lib/jars/commons-net-1.4.1.jar:${project.dir}/build/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/lib/jars/compress-lzf-0.8.4.jar:${project.dir}/build/lib/jars/concurrent-trees-2.4.0.jar:${project.dir}/build/lib/jars/ecj-4.6.1.jar:${project.dir}/build/lib/jars/ftplet-api-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-core-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-deprecated-1.0.0-M2.jar:${project.dir}/build/lib/jars/guava-27.0-jre.jar:${project.dir}/build/lib/jars/hadoop-core-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-minicluster-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-test-1.0.3.jar:${project.dir}/build/lib/jars/high-scale-lib-1.0.6.jar:${project.dir}/build/lib/jars/hppc-0.8.1.jar:${project.dir}/build/lib/jars/hsqldb-1.8.0.10.jar:${project.dir}/build/lib/jars/j2objc-annotations-1.3.jar:${project.dir}/build/lib/jars/jackson-annotations-2.9.10.jar:${project.dir}/build/lib/jars/jackson-core-2.9.10.jar:${project.dir}/build/lib/jars/jackson-core-asl-1.0.1.jar:${project.dir}/build/lib/jars/jackson-databind-2.9.10.8.jar:${project.dir}/build/lib/jars/jackson-mapper-asl-1.0.1.jar:${project.dir}/build/lib/jars/jacocoagent.jar:${project.dir}/build/lib/jars/jamm-0.3.2.jar:${project.dir}/build/lib/jars/jasper-compiler-5.5.12.jar:${project.dir}/build/lib/jars/jasper-runtime-5.5.12.jar:${project.dir}/build/lib/jars/java-cup-runtime-11b-20160615.jar:${project.dir}/build/lib/jars/javax.inject-1.jar:${project.dir}/build/lib/jars/jbcrypt-0.3m.jar:${project.dir}/build/lib/jars/jcl-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/jcommander-1.30.jar:${project.dir}/build/lib/jars/jctools-core-3.1.0.jar:${project.dir}/build/lib/jars/jersey-core-1.0.jar:${project.dir}/build/lib/jars/jersey-server-1.0.jar:${project.dir}/build/lib/jars/jets3t-0.7.1.jar:${project.dir}/build/lib/jars/jetty-6.1.26.jar:${project.dir}/build/lib/jars/jetty-util-6.1.26.jar:${project.dir}/build/lib/jars/jflex-1.8.2.jar:${project.dir}/build/lib/jars/jna-5.6.0.jar:${project.dir}/build/lib/jars/json-simple-1.1.jar:${project.dir}/build/lib/jars/jsp-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsp-api-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsr305-2.0.2.jar:${project.dir}/build/lib/jars/jsr311-api-1.0.jar:${project.dir}/build/lib/jars/jvm-attach-api-1.5.jar:${project.dir}/build/lib/jars/kfs-0.3.jar:${project.dir}/build/lib/jars/log4j-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/logback-classic-1.2.3.jar:${project.dir}/build/lib/jars/logback-core-1.2.3.jar:${project.dir}/build/lib/jars/lz4-java-1.7.1.jar:${project.dir}/build/lib/jars/metrics-core-3.1.5.jar:${project.dir}/build/lib/jars/metrics-jvm-3.1.5.jar:${project.dir}/build/lib/jars/metrics-logback-3.1.5.jar:${project.dir}/build/lib/jars/mina-core-2.0.0-M5.jar:${project.dir}/build/lib/jars/mxdump-0.14.jar:${project.dir}/build/lib/jars/netty-all-4.1.58.Final.jar:${project.dir}/build/lib/jars/netty-tcnative-boringssl-static-2.0.36.Final.jar:${project.dir}/build/lib/jars/ohc-core-0.5.1.jar:${project.dir}/build/lib/jars/ohc-core-j8-0.5.1.jar:${project.dir}/build/lib/jars/oro-2.0.8.jar:${project.dir}/build/lib/jars/psjava-0.1.19.jar:${project.dir}/build/lib/jars/reporter-config-base-3.0.3.jar:${project.dir}/build/lib/jars/reporter-config3-3.0.3.jar:${project.dir}/build/lib/jars/servlet-api-2.5-6.1.14.jar:${project.dir}/build/lib/jars/sigar-1.6.4.jar:${project.dir}/build/lib/jars/sjk-cli-0.14.jar:${project.dir}/build/lib/jars/sjk-core-0.14.jar:${project.dir}/build/lib/jars/sjk-json-0.14.jar:${project.dir}/build/lib/jars/sjk-stacktrace-0.14.jar:${project.dir}/build/lib/jars/slf4j-api-1.7.25.jar:${project.dir}/build/lib/jars/snakeyaml-1.26.jar:${project.dir}/build/lib/jars/snappy-java-1.1.2.6.jar:${project.dir}/build/lib/jars/snowball-stemmer-1.3.0.581.1.jar:${project.dir}/build/lib/jars/stream-2.5.2.jar:${project.dir}/build/lib/jars/xmlenc-0.52.jar:${project.dir}/build/lib/jars/zstd-jni-1.3.8-5.jar:${project.dir}/build/test/lib/jars/animal-sniffer-annotations-1.14.jar:${project.dir}/build/test/lib/jars/ant-1.9.7.jar:${project.dir}/build/test/lib/jars/ant-junit-1.9.7.jar:${project.dir}/build/test/lib/jars/ant-launcher-1.9.7.jar:${project.dir}/build/test/lib/jars/asm-6.0.jar:${project.dir}/build/test/lib/jars/asm-analysis-6.0.jar:${project.dir}/build/test/lib/jars/asm-commons-6.0.jar:${project.dir}/build/test/lib/jars/asm-tree-6.0.jar:${project.dir}/build/test/lib/jars/asm-util-6.0.jar:${project.dir}/build/test/lib/jars/asm-xml-6.0.jar:${project.dir}/build/test/lib/jars/assertj-core-3.15.0.jar:${project.dir}/build/test/lib/jars/awaitility-4.0.3.jar:${project.dir}/build/test/lib/jars/byte-buddy-1.10.5.jar:${project.dir}/build/test/lib/jars/byte-buddy-agent-1.10.5.jar:${project.dir}/build/test/lib/jars/byteman-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-bmunit-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-install-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-submit-4.0.6.jar:${project.dir}/build/test/lib/jars/checker-qual-2.0.0.jar:${project.dir}/build/test/lib/jars/commons-io-2.6.jar:${project.dir}/build/test/lib/jars/commons-math3-3.2.jar:${project.dir}/build/test/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/test/lib/jars/dtest-api-0.0.7.jar:${project.dir}/build/test/lib/jars/error_prone_annotations-2.0.18.jar:${project.dir}/build/test/lib/jars/guava-23.5-android.jar:${project.dir}/build/test/lib/jars/hamcrest-2.2.jar:${project.dir}/build/test/lib/jars/j2objc-annotations-1.1.jar:${project.dir}/build/test/lib/jars/java-allocation-instrumenter-3.1.0.jar:${project.dir}/build/test/lib/jars/javassist-3.26.0-GA.jar:${project.dir}/build/test/lib/jars/jmh-core-1.21.jar:${project.dir}/build/test/lib/jars/jmh-generator-annprocess-1.21.jar:${project.dir}/build/test/lib/jars/jopt-simple-4.6.jar:${project.dir}/build/test/lib/jars/jsr305-1.3.9.jar:${project.dir}/build/test/lib/jars/junit-4.12.jar:${project.dir}/build/test/lib/jars/mockito-core-3.2.4.jar:${project.dir}/build/test/lib/jars/objenesis-2.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.agent-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.ant-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.core-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.report-0.8.6.jar:${project.dir}/build/test/lib/jars/quicktheories-0.26.jar:${project.dir}/build/test/lib/jars/reflections-0.9.12.jar:${project.dir}/build/test/lib/jars/slf4j-api-1.7.25.jar:</property>
+                <property name="cassandra.classpath.jars">${project.dir}/build/lib/jars/HdrHistogram-2.1.9.jar:${project.dir}/build/lib/jars/ST4-4.0.8.jar:${project.dir}/build/lib/jars/airline-0.8.jar:${project.dir}/build/lib/jars/antlr-3.5.2.jar:${project.dir}/build/lib/jars/antlr-runtime-3.5.2.jar:${project.dir}/build/lib/jars/asm-7.1.jar:${project.dir}/build/lib/jars/assertj-core-3.15.0.jar:${project.dir}/build/lib/jars/byteman-4.0.6.jar:${project.dir}/build/lib/jars/byteman-bmunit-4.0.6.jar:${project.dir}/build/lib/jars/byteman-install-4.0.6.jar:${project.dir}/build/lib/jars/byteman-submit-4.0.6.jar:${project.dir}/build/lib/jars/caffeine-2.3.5.jar:${project.dir}/build/lib/jars/cassandra-driver-core-3.11.0-shaded.jar:${project.dir}/build/lib/jars/chronicle-bytes-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-core-2.20.126.jar:${project.dir}/build/lib/jars/chronicle-queue-5.20.123.jar:${project.dir}/build/lib/jars/chronicle-threads-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-wire-2.20.117.jar:${project.dir}/build/lib/jars/commons-beanutils-1.7.0.jar:${project.dir}/build/lib/jars/commons-beanutils-core-1.8.0.jar:${project.dir}/build/lib/jars/commons-cli-1.1.jar:${project.dir}/build/lib/jars/commons-codec-1.9.jar:${project.dir}/build/lib/jars/commons-collections-3.2.1.jar:${project.dir}/build/lib/jars/commons-configuration-1.6.jar:${project.dir}/build/lib/jars/commons-digester-1.8.jar:${project.dir}/build/lib/jars/commons-el-1.0.jar:${project.dir}/build/lib/jars/commons-httpclient-3.0.1.jar:${project.dir}/build/lib/jars/commons-lang3-3.11.jar:${project.dir}/build/lib/jars/commons-math-2.1.jar:${project.dir}/build/lib/jars/commons-math3-3.2.jar:${project.dir}/build/lib/jars/commons-net-1.4.1.jar:${project.dir}/build/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/lib/jars/compress-lzf-0.8.4.jar:${project.dir}/build/lib/jars/concurrent-trees-2.4.0.jar:${project.dir}/build/lib/jars/ecj-4.6.1.jar:${project.dir}/build/lib/jars/ftplet-api-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-core-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-deprecated-1.0.0-M2.jar:${project.dir}/build/lib/jars/guava-27.0-jre.jar:${project.dir}/build/lib/jars/hadoop-core-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-minicluster-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-test-1.0.3.jar:${project.dir}/build/lib/jars/high-scale-lib-1.0.6.jar:${project.dir}/build/lib/jars/hppc-0.8.1.jar:${project.dir}/build/lib/jars/hsqldb-1.8.0.10.jar:${project.dir}/build/lib/jars/j2objc-annotations-1.3.jar:${project.dir}/build/lib/jars/jackson-annotations-2.9.10.jar:${project.dir}/build/lib/jars/jackson-core-2.9.10.jar:${project.dir}/build/lib/jars/jackson-core-asl-1.0.1.jar:${project.dir}/build/lib/jars/jackson-databind-2.9.10.8.jar:${project.dir}/build/lib/jars/jackson-mapper-asl-1.0.1.jar:${project.dir}/build/lib/jars/jacocoagent.jar:${project.dir}/build/lib/jars/jamm-0.3.2.jar:${project.dir}/build/lib/jars/jasper-compiler-5.5.12.jar:${project.dir}/build/lib/jars/jasper-runtime-5.5.12.jar:${project.dir}/build/lib/jars/java-cup-runtime-11b-20160615.jar:${project.dir}/build/lib/jars/javax.inject-1.jar:${project.dir}/build/lib/jars/jbcrypt-0.3m.jar:${project.dir}/build/lib/jars/jcl-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/jcommander-1.30.jar:${project.dir}/build/lib/jars/jctools-core-3.1.0.jar:${project.dir}/build/lib/jars/jersey-core-1.0.jar:${project.dir}/build/lib/jars/jersey-server-1.0.jar:${project.dir}/build/lib/jars/jets3t-0.7.1.jar:${project.dir}/build/lib/jars/jetty-6.1.26.jar:${project.dir}/build/lib/jars/jetty-util-6.1.26.jar:${project.dir}/build/lib/jars/jflex-1.8.2.jar:${project.dir}/build/lib/jars/jna-5.6.0.jar:${project.dir}/build/lib/jars/json-simple-1.1.jar:${project.dir}/build/lib/jars/jsp-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsp-api-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsr305-2.0.2.jar:${project.dir}/build/lib/jars/jsr311-api-1.0.jar:${project.dir}/build/lib/jars/jvm-attach-api-1.5.jar:${project.dir}/build/lib/jars/kfs-0.3.jar:${project.dir}/build/lib/jars/log4j-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/logback-classic-1.2.3.jar:${project.dir}/build/lib/jars/logback-core-1.2.3.jar:${project.dir}/build/lib/jars/lz4-java-1.7.1.jar:${project.dir}/build/lib/jars/metrics-core-3.1.5.jar:${project.dir}/build/lib/jars/metrics-jvm-3.1.5.jar:${project.dir}/build/lib/jars/metrics-logback-3.1.5.jar:${project.dir}/build/lib/jars/mina-core-2.0.0-M5.jar:${project.dir}/build/lib/jars/mxdump-0.14.jar:${project.dir}/build/lib/jars/netty-all-4.1.58.Final.jar:${project.dir}/build/lib/jars/netty-tcnative-boringssl-static-2.0.36.Final.jar:${project.dir}/build/lib/jars/ohc-core-0.5.1.jar:${project.dir}/build/lib/jars/ohc-core-j8-0.5.1.jar:${project.dir}/build/lib/jars/oro-2.0.8.jar:${project.dir}/build/lib/jars/psjava-0.1.19.jar:${project.dir}/build/lib/jars/reporter-config-base-3.0.3.jar:${project.dir}/build/lib/jars/reporter-config3-3.0.3.jar:${project.dir}/build/lib/jars/servlet-api-2.5-6.1.14.jar:${project.dir}/build/lib/jars/sigar-1.6.4.jar:${project.dir}/build/lib/jars/sjk-cli-0.14.jar:${project.dir}/build/lib/jars/sjk-core-0.14.jar:${project.dir}/build/lib/jars/sjk-json-0.14.jar:${project.dir}/build/lib/jars/sjk-stacktrace-0.14.jar:${project.dir}/build/lib/jars/slf4j-api-1.7.25.jar:${project.dir}/build/lib/jars/snakeyaml-1.26.jar:${project.dir}/build/lib/jars/snappy-java-1.1.2.6.jar:${project.dir}/build/lib/jars/snowball-stemmer-1.3.0.581.1.jar:${project.dir}/build/lib/jars/stream-2.5.2.jar:${project.dir}/build/lib/jars/xmlenc-0.52.jar:${project.dir}/build/lib/jars/zstd-jni-1.3.8-5.jar:${project.dir}/build/test/lib/jars/animal-sniffer-annotations-1.14.jar:${project.dir}/build/test/lib/jars/ant-1.9.7.jar:${project.dir}/build/test/lib/jars/ant-junit-1.9.7.jar:${project.dir}/build/test/lib/jars/ant-launcher-1.9.7.jar:${project.dir}/build/test/lib/jars/asm-6.0.jar:${project.dir}/build/test/lib/jars/asm-analysis-6.0.jar:${project.dir}/build/test/lib/jars/asm-commons-6.0.jar:${project.dir}/build/test/lib/jars/asm-tree-6.0.jar:${project.dir}/build/test/lib/jars/asm-util-6.0.jar:${project.dir}/build/test/lib/jars/asm-xml-6.0.jar:${project.dir}/build/test/lib/jars/assertj-core-3.15.0.jar:${project.dir}/build/test/lib/jars/awaitility-4.0.3.jar:${project.dir}/build/test/lib/jars/byte-buddy-1.10.5.jar:${project.dir}/build/test/lib/jars/byte-buddy-agent-1.10.5.jar:${project.dir}/build/test/lib/jars/byteman-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-bmunit-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-install-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-submit-4.0.6.jar:${project.dir}/build/test/lib/jars/checker-qual-2.0.0.jar:${project.dir}/build/test/lib/jars/commons-io-2.6.jar:${project.dir}/build/test/lib/jars/commons-math3-3.2.jar:${project.dir}/build/test/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/test/lib/jars/dtest-api-0.0.7.jar:${project.dir}/build/test/lib/jars/error_prone_annotations-2.0.18.jar:${project.dir}/build/test/lib/jars/guava-23.5-android.jar:${project.dir}/build/test/lib/jars/hamcrest-2.2.jar:${project.dir}/build/test/lib/jars/j2objc-annotations-1.1.jar:${project.dir}/build/test/lib/jars/java-allocation-instrumenter-3.1.0.jar:${project.dir}/build/test/lib/jars/javassist-3.26.0-GA.jar:${project.dir}/build/test/lib/jars/jmh-core-1.21.jar:${project.dir}/build/test/lib/jars/jmh-generator-annprocess-1.21.jar:${project.dir}/build/test/lib/jars/jopt-simple-4.6.jar:${project.dir}/build/test/lib/jars/jsr305-1.3.9.jar:${project.dir}/build/test/lib/jars/junit-4.12.jar:${project.dir}/build/test/lib/jars/mockito-core-3.2.4.jar:${project.dir}/build/test/lib/jars/objenesis-2.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.agent-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.ant-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.core-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.report-0.8.6.jar:${project.dir}/build/test/lib/jars/quicktheories-0.26.jar:${project.dir}/build/test/lib/jars/reflections-0.9.12.jar:${project.dir}/build/test/lib/jars/slf4j-api-1.7.25.jar:</property>
             </properties>
             <folders>
                 <source-folder>
index 7de95cf24c8094e770d3273b1af9c6c9ddfa4736..7e123bd67a89f5c132eced3e23cbc4a0677b8ae5 100644 (file)
@@ -731,6 +731,7 @@ syntax_rules += r'''
 <selector> ::= [colname]=<cident> ( "[" ( <term> ( ".." <term> "]" )? | <term> ".." ) )?
              | <udtSubfieldSelection>
              | "WRITETIME" "(" [colname]=<cident> ")"
+             | "MAXWRITETIME" "(" [colname]=<cident> ")"
              | "TTL" "(" [colname]=<cident> ")"
              | "COUNT" "(" star=( "*" | "1" ) ")"
              | "CAST" "(" <selector> "AS" <storageType> ")"
index aa1fbc01839e01e0c7f68aa238ca864fd062743e..cc8590a44f44c2b850aea6b68765d1d3e8dda02c 100644 (file)
@@ -131,7 +131,7 @@ cqlsh_serial_consistency_level_syntax_rules = r'''
 '''
 
 cqlsh_show_cmd_syntax_rules = r'''
-<showCommand> ::= "SHOW" what=( "VERSION" | "HOST" | "SESSION" sessionid=<uuid> )
+<showCommand> ::= "SHOW" what=( "VERSION" | "HOST" | "SESSION" sessionid=<uuid> | "REPLICAS" token=<integer> (keyspace=<keyspaceName>)? )
                 ;
 '''
 
index b49a29aebdaf2db719d66bcda1700d1320488bd4..39bc060485da5332fbd0f6e5915f7c8f3a6ef3b8 100644 (file)
@@ -326,19 +326,9 @@ def format_integer_type(val, colormap, thousands_sep=None, **_):
     return colorme(bval, colormap, 'int')
 
 
-# We can get rid of this in cassandra-2.2
-if sys.version_info >= (2, 7):
-    def format_integer_with_thousands_sep(val, thousands_sep=','):
-        return "{:,.0f}".format(val).replace(',', thousands_sep)
-else:
-    def format_integer_with_thousands_sep(val, thousands_sep=','):
-        if val < 0:
-            return '-' + format_integer_with_thousands_sep(-val, thousands_sep)
-        result = ''
-        while val >= 1000:
-            val, r = divmod(val, 1000)
-            result = "%s%03d%s" % (thousands_sep, r, result)
-        return "%d%s" % (val, result)
+def format_integer_with_thousands_sep(val, thousands_sep=','):
+    return "{:,.0f}".format(val).replace(',', thousands_sep)
+
 
 formatter_for('long')(format_integer_type)
 formatter_for('int')(format_integer_type)
index 69f31dced77092373bdb4ef9b679ce7f1c151e15..c1fd55edbfd6f9eef94933267496eaf79ec5756d 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+"""Pylexotron uses Python's re.Scanner module as a simple regex-based tokenizer for BNF production rules"""
+
 import re
+import inspect
+import sys
+from typing import Union
 
 from cqlshlib.saferscanner import SaferScanner
 
@@ -56,8 +61,8 @@ class Hint:
         return '%s(%r)' % (self.__class__, self.text)
 
 
-def is_hint(x):
-    return isinstance(x, Hint)
+def is_hint(obj):
+    return isinstance(obj, Hint)
 
 
 class ParseContext:
@@ -115,7 +120,7 @@ class ParseContext:
                % (self.__class__.__name__, self.matched, self.remainder, self.productionname, self.bindings)
 
 
-class matcher:
+class Matcher:
 
     def __init__(self, arg):
         self.arg = arg
@@ -155,38 +160,38 @@ class matcher:
         return '%s(%r)' % (self.__class__.__name__, self.arg)
 
 
-class choice(matcher):
+class Choice(Matcher):
 
     def match(self, ctxt, completions):
         foundctxts = []
-        for a in self.arg:
-            subctxts = a.match(ctxt, completions)
+        for each in self.arg:
+            subctxts = each.match(ctxt, completions)
             foundctxts.extend(subctxts)
         return foundctxts
 
 
-class one_or_none(matcher):
+class OneOrNone(Matcher):
 
     def match(self, ctxt, completions):
         return [ctxt] + list(self.arg.match(ctxt, completions))
 
 
-class repeat(matcher):
+class Repeat(Matcher):
 
     def match(self, ctxt, completions):
         found = [ctxt]
         ctxts = [ctxt]
         while True:
             new_ctxts = []
-            for c in ctxts:
-                new_ctxts.extend(self.arg.match(c, completions))
+            for each in ctxts:
+                new_ctxts.extend(self.arg.match(each, completions))
             if not new_ctxts:
                 return found
             found.extend(new_ctxts)
             ctxts = new_ctxts
 
 
-class rule_reference(matcher):
+class RuleReference(Matcher):
 
     def match(self, ctxt, completions):
         prevname = ctxt.productionname
@@ -198,24 +203,24 @@ class rule_reference(matcher):
         return [c.with_production_named(prevname) for c in output]
 
 
-class rule_series(matcher):
+class RuleSeries(Matcher):
 
     def match(self, ctxt, completions):
         ctxts = [ctxt]
         for patpiece in self.arg:
             new_ctxts = []
-            for c in ctxts:
-                new_ctxts.extend(patpiece.match(c, completions))
+            for each in ctxts:
+                new_ctxts.extend(patpiece.match(each, completions))
             if not new_ctxts:
                 return ()
             ctxts = new_ctxts
         return ctxts
 
 
-class named_symbol(matcher):
+class NamedSymbol(Matcher):
 
     def __init__(self, name, arg):
-        matcher.__init__(self, arg)
+        Matcher.__init__(self, arg)
         self.name = name
 
     def match(self, ctxt, completions):
@@ -224,13 +229,14 @@ class named_symbol(matcher):
             # don't collect other completions under this; use a dummy
             pass_in_compls = set()
         results = self.arg.match_with_results(ctxt, pass_in_compls)
-        return [c.with_binding(self.name, ctxt.extract_orig(matchtoks)) for (c, matchtoks) in results]
+        return [c.with_binding(self.name, ctxt.extract_orig(matchtoks))
+                for (c, matchtoks) in results]
 
     def __repr__(self):
         return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.arg)
 
 
-class named_collector(named_symbol):
+class NamedCollector(NamedSymbol):
 
     def match(self, ctxt, completions):
         pass_in_compls = completions
@@ -244,18 +250,21 @@ class named_collector(named_symbol):
         return output
 
 
-class terminal_matcher(matcher):
+class TerminalMatcher(Matcher):
+
+    def match(self, ctxt, completions):
+        raise NotImplementedError
 
     def pattern(self):
         raise NotImplementedError
 
 
-class regex_rule(terminal_matcher):
+class RegexRule(TerminalMatcher):
 
     def __init__(self, pat):
-        terminal_matcher.__init__(self, pat)
+        TerminalMatcher.__init__(self, pat)
         self.regex = pat
-        self.re = re.compile(pat + '$', re.I | re.S)
+        self.re = re.compile(pat + '$', re.IGNORECASE | re.DOTALL)
 
     def match(self, ctxt, completions):
         if ctxt.remainder:
@@ -269,12 +278,12 @@ class regex_rule(terminal_matcher):
         return self.regex
 
 
-class text_match(terminal_matcher):
+class TextMatch(TerminalMatcher):
     alpha_re = re.compile(r'[a-zA-Z]')
 
     def __init__(self, text):
         try:
-            terminal_matcher.__init__(self, eval(text))
+            TerminalMatcher.__init__(self, eval(text))
         except SyntaxError:
             print("bad syntax %r" % (text,))
 
@@ -289,12 +298,13 @@ class text_match(terminal_matcher):
     def pattern(self):
         # can't use (?i) here- Scanner component regex flags won't be applied
         def ignorecaseify(matchobj):
-            c = matchobj.group(0)
-            return '[%s%s]' % (c.upper(), c.lower())
+            val = matchobj.group(0)
+            return '[%s%s]' % (val.upper(), val.lower())
+
         return self.alpha_re.sub(ignorecaseify, re.escape(self.arg))
 
 
-class case_match(text_match):
+class CaseMatch(TextMatch):
 
     def match(self, ctxt, completions):
         if ctxt.remainder:
@@ -308,22 +318,22 @@ class case_match(text_match):
         return re.escape(self.arg)
 
 
-class word_match(text_match):
+class WordMatch(TextMatch):
 
     def pattern(self):
-        return r'\b' + text_match.pattern(self) + r'\b'
+        return r'\b' + TextMatch.pattern(self) + r'\b'
 
 
-class case_word_match(case_match):
+class CaseWordMatch(CaseMatch):
 
     def pattern(self):
-        return r'\b' + case_match.pattern(self) + r'\b'
+        return r'\b' + CaseMatch.pattern(self) + r'\b'
 
 
-class terminal_type_matcher(matcher):
+class TerminalTypeMatcher(Matcher):
 
     def __init__(self, tokentype, submatcher):
-        matcher.__init__(self, tokentype)
+        Matcher.__init__(self, tokentype)
         self.tokentype = tokentype
         self.submatcher = submatcher
 
@@ -340,18 +350,24 @@ class terminal_type_matcher(matcher):
 
 
 class ParsingRuleSet:
+    """Define the BNF tokenization rules for cql3handling.syntax_rules. Backus-Naur Form consists of
+       - Production rules in the form: Left-Hand-Side ::= Right-Hand-Side.  The LHS is a non-terminal.
+       - Productions or non-terminal symbols
+       - Terminal symbols.  Every terminal is a single token.
+    """
+
     RuleSpecScanner = SaferScanner([
-        (r'::=', lambda s, t: t),
+        (r'::=', lambda s, t: t),                   # BNF rule definition
         (r'\[[a-z0-9_]+\]=', lambda s, t: ('named_collector', t[1:-2])),
         (r'[a-z0-9_]+=', lambda s, t: ('named_symbol', t[:-1])),
         (r'/(\[\^?.[^]]*\]|[^/]|\\.)*/', lambda s, t: ('regex', t[1:-1].replace(r'\/', '/'))),
-        (r'"([^"]|\\.)*"', lambda s, t: ('litstring', t)),
+        (r'"([^"]|\\.)*"', lambda s, t: ('string_literal', t)),
         (r'<[^>]*>', lambda s, t: ('reference', t[1:-1])),
         (r'\bJUNK\b', lambda s, t: ('junk', t)),
         (r'[@()|?*;]', lambda s, t: t),
-        (r'\s+', None),
+        (r'\s+', None),                             # whitespace
         (r'#[^\n]*', None),
-    ], re.I | re.S | re.U)
+    ], re.IGNORECASE | re.DOTALL | re.UNICODE)
 
     def __init__(self):
         self.ruleset = {}
@@ -368,7 +384,7 @@ class ParsingRuleSet:
     def parse_rules(cls, rulestr):
         tokens, unmatched = cls.RuleSpecScanner.scan(rulestr)
         if unmatched:
-            raise LexingError.from_text(rulestr, unmatched, msg="Syntax rules unparseable")
+            raise LexingError.from_text(rulestr, unmatched, msg="Syntax rules are unparseable")
         rules = {}
         terminals = []
         tokeniter = iter(tokens)
@@ -379,9 +395,9 @@ class ParsingRuleSet:
                     raise ValueError('Unexpected token %r; expected "::="' % (assign,))
                 name = t[1]
                 production = cls.read_rule_tokens_until(';', tokeniter)
-                if isinstance(production, terminal_matcher):
+                if isinstance(production, TerminalMatcher):
                     terminals.append((name, production))
-                    production = terminal_type_matcher(name, production)
+                    production = TerminalTypeMatcher(name, production)
                 rules[name] = production
             else:
                 raise ValueError('Unexpected token %r; expected name' % (t,))
@@ -392,11 +408,11 @@ class ParsingRuleSet:
         if isinstance(pieces, (tuple, list)):
             if len(pieces) == 1:
                 return pieces[0]
-            return rule_series(pieces)
+            return RuleSeries(pieces)
         return pieces
 
     @classmethod
-    def read_rule_tokens_until(cls, endtoks, tokeniter):
+    def read_rule_tokens_until(cls, endtoks: Union[str, int], tokeniter):
         if isinstance(endtoks, str):
             endtoks = (endtoks,)
         counttarget = None
@@ -411,32 +427,32 @@ class ParsingRuleSet:
             if t in endtoks:
                 if len(mybranches) == 1:
                     return cls.mkrule(mybranches[0])
-                return choice(list(map(cls.mkrule, mybranches)))
+                return Choice(list(map(cls.mkrule, mybranches)))
             if isinstance(t, tuple):
                 if t[0] == 'reference':
-                    t = rule_reference(t[1])
-                elif t[0] == 'litstring':
+                    t = RuleReference(t[1])
+                elif t[0] == 'string_literal':
                     if t[1][1].isalnum() or t[1][1] == '_':
-                        t = word_match(t[1])
+                        t = WordMatch(t[1])
                     else:
-                        t = text_match(t[1])
+                        t = TextMatch(t[1])
                 elif t[0] == 'regex':
-                    t = regex_rule(t[1])
+                    t = RegexRule(t[1])
                 elif t[0] == 'named_collector':
-                    t = named_collector(t[1], cls.read_rule_tokens_until(1, tokeniter))
+                    t = NamedCollector(t[1], cls.read_rule_tokens_until(1, tokeniter))
                 elif t[0] == 'named_symbol':
-                    t = named_symbol(t[1], cls.read_rule_tokens_until(1, tokeniter))
+                    t = NamedSymbol(t[1], cls.read_rule_tokens_until(1, tokeniter))
             elif t == '(':
                 t = cls.read_rule_tokens_until(')', tokeniter)
             elif t == '?':
-                t = one_or_none(myrules.pop(-1))
+                t = OneOrNone(myrules.pop(-1))
             elif t == '*':
-                t = repeat(myrules.pop(-1))
+                t = Repeat(myrules.pop(-1))
             elif t == '@':
-                x = next(tokeniter)
-                if not isinstance(x, tuple) or x[0] != 'litstring':
-                    raise ValueError("Unexpected token %r following '@'" % (x,))
-                t = case_match(x[1])
+                val = next(tokeniter)
+                if not isinstance(val, tuple) or val[0] != 'string_literal':
+                    raise ValueError("Unexpected token %r following '@'" % (val,))
+                t = CaseMatch(val[1])
             elif t == '|':
                 myrules = []
                 mybranches.append(myrules)
@@ -447,7 +463,7 @@ class ParsingRuleSet:
             if countsofar == counttarget:
                 if len(mybranches) == 1:
                     return cls.mkrule(mybranches[0])
-                return choice(list(map(cls.mkrule, mybranches)))
+                return Choice(list(map(cls.mkrule, mybranches)))
         raise ValueError('Unexpected end of rule tokens')
 
     def append_rules(self, rulestr):
@@ -465,8 +481,9 @@ class ParsingRuleSet:
             if name == 'JUNK':
                 return None
             return lambda s, t: (name, t, s.match.span())
+
         regexes = [(p.pattern(), make_handler(name)) for (name, p) in self.terminals]
-        return SaferScanner(regexes, re.I | re.S | re.U).scan
+        return SaferScanner(regexes, re.IGNORECASE | re.DOTALL | re.UNICODE).scan
 
     def lex(self, text):
         if self.scanner is None:
@@ -487,9 +504,9 @@ class ParsingRuleSet:
         bindings = {}
         if srcstr is not None:
             bindings['*SRC*'] = srcstr
-        for c in self.parse(startsymbol, tokens, init_bindings=bindings):
-            if not c.remainder:
-                return c
+        for val in self.parse(startsymbol, tokens, init_bindings=bindings):
+            if not val.remainder:
+                return val
 
     def lex_and_parse(self, text, startsymbol='Start'):
         return self.parse(startsymbol, self.lex(text), init_bindings={'*SRC*': text})
@@ -511,9 +528,6 @@ class ParsingRuleSet:
         return completions
 
 
-import sys
-
-
 class Debugotron(set):
     depth = 10
 
@@ -525,9 +539,9 @@ class Debugotron(set):
         self._note_addition(item)
         set.add(self, item)
 
-    def _note_addition(self, foo):
-        self.stream.write("\nitem %r added by:\n" % (foo,))
-        frame = sys._getframe().f_back.f_back
+    def _note_addition(self, item):
+        self.stream.write("\nitem %r added by:\n" % (item,))
+        frame = inspect.currentframe().f_back.f_back
         for i in range(self.depth):
             name = frame.f_code.co_name
             filename = frame.f_code.co_filename
index 7431c1cd8e64a040e11947f209bc59b0f80e1619..3711dfd85dcb6b998271abffcf0daa7440045fb3 100644 (file)
@@ -161,9 +161,9 @@ exit 0
 %{_sysconfdir}/security/limits.d/%{username}.conf
 /usr/share/%{username}*
 %config(noreplace) /%{_sysconfdir}/%{username}
-%attr(755,%{username},%{username}) %config(noreplace) /var/lib/%{username}/*
-%attr(755,%{username},%{username}) /var/log/%{username}*
-%attr(755,%{username},%{username}) /var/run/%{username}*
+%attr(750,%{username},%{username}) %config(noreplace) /var/lib/%{username}/*
+%attr(750,%{username},%{username}) /var/log/%{username}*
+%attr(750,%{username},%{username}) /var/run/%{username}*
 %{python_sitelib}/cqlshlib/
 %{python_sitelib}/cassandra_pylib*.egg-info
 
diff --git a/redhat/noboolean/README b/redhat/noboolean/README
new file mode 100644 (file)
index 0000000..33ab959
--- /dev/null
@@ -0,0 +1,23 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+These files create the 'noboolean' rpm packaging, using the same procedure as normal.
+These differ from the other packages by not using boolean dependency logic, intended for
+systems using rpmlib < 4.13.
+
+See CASSANDRA-17765 for more information.
diff --git a/redhat/noboolean/cassandra b/redhat/noboolean/cassandra
new file mode 120000 (symlink)
index 0000000..d9af9ad
--- /dev/null
@@ -0,0 +1 @@
+../cassandra
\ No newline at end of file
diff --git a/redhat/noboolean/cassandra.conf b/redhat/noboolean/cassandra.conf
new file mode 120000 (symlink)
index 0000000..7c12fb6
--- /dev/null
@@ -0,0 +1 @@
+../cassandra.conf
\ No newline at end of file
diff --git a/redhat/noboolean/cassandra.in.sh b/redhat/noboolean/cassandra.in.sh
new file mode 120000 (symlink)
index 0000000..115b45b
--- /dev/null
@@ -0,0 +1 @@
+../cassandra.in.sh
\ No newline at end of file
diff --git a/redhat/noboolean/cassandra.spec b/redhat/noboolean/cassandra.spec
new file mode 100644 (file)
index 0000000..8c04fdb
--- /dev/null
@@ -0,0 +1,211 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+%define __jar_repack %{nil}
+# Turn off the brp-python-bytecompile script
+%global __os_install_post %(echo '%{__os_install_post}' | sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g')
+
+# rpmbuild should not barf when it spots we ship
+# binary executable files in our 'noarch' package
+%define _binaries_in_noarch_packages_terminate_build   0
+
+%define __python /usr/bin/python3
+
+%global username cassandra
+
+# input of ~alphaN, ~betaN, ~rcN package versions need to retain upstream '-alphaN, etc' version for sources
+%define upstream_version %(echo %{version} | sed -r 's/~/-/g')
+%define relname apache-cassandra-%{upstream_version}
+
+Name:          cassandra
+Version:       %{version}
+Release:       %{revision}
+Summary:       Cassandra is a highly scalable, eventually consistent, distributed, structured key-value store.
+
+Group:         Development/Libraries
+License:       Apache Software License 2.0
+URL:           http://cassandra.apache.org/
+Source0:       %{relname}-src.tar.gz
+BuildRoot:     %{_tmppath}/%{relname}root-%(%{__id_u} -n)
+
+BuildRequires: ant >= 1.9
+BuildRequires: ant-junit >= 1.9
+
+Requires:      jre >= 1.8.0
+Requires:      python(abi) >= 3.6
+Requires:      procps-ng >= 3.3
+Requires(pre): user(cassandra)
+Requires(pre): group(cassandra)
+Requires(pre): shadow-utils
+Provides:      user(cassandra)
+Provides:      group(cassandra)
+
+BuildArch:     noarch
+
+# Don't examine the .so files we bundle for dependencies
+AutoReqProv:   no
+
+%description
+Cassandra is a distributed (peer-to-peer) system for the management and storage of structured data.
+
+%prep
+%setup -q -n %{relname}-src
+
+%build
+export LANG=en_US.UTF-8
+export JAVA_TOOL_OPTIONS="-Dfile.encoding=UTF-8"
+ant clean jar -Dversion=%{upstream_version}
+
+%install
+%{__rm} -rf %{buildroot}
+mkdir -p %{buildroot}/%{_sysconfdir}/%{username}
+mkdir -p %{buildroot}/usr/share/%{username}
+mkdir -p %{buildroot}/usr/share/%{username}/lib
+mkdir -p %{buildroot}/%{_sysconfdir}/%{username}/default.conf
+mkdir -p %{buildroot}/%{_sysconfdir}/rc.d/init.d
+mkdir -p %{buildroot}/%{_sysconfdir}/security/limits.d
+mkdir -p %{buildroot}/%{_sysconfdir}/default
+mkdir -p %{buildroot}/usr/sbin
+mkdir -p %{buildroot}/usr/bin
+mkdir -p %{buildroot}/var/lib/%{username}/commitlog
+mkdir -p %{buildroot}/var/lib/%{username}/data
+mkdir -p %{buildroot}/var/lib/%{username}/saved_caches
+mkdir -p %{buildroot}/var/lib/%{username}/hints
+mkdir -p %{buildroot}/var/run/%{username}
+mkdir -p %{buildroot}/var/log/%{username}
+( cd pylib && %{__python} setup.py install --no-compile --root %{buildroot}; )
+
+# patches for data and log paths
+patch -p1 < debian/patches/cassandra_yaml_dirs.diff
+patch -p1 < debian/patches/cassandra_logdir_fix.diff
+# uncomment hints_directory path
+sed -i 's/^# hints_directory:/hints_directory:/' conf/cassandra.yaml
+
+# remove other files not being installed
+rm -f bin/*.orig
+rm -f bin/cassandra.in.sh
+rm -f lib/sigar-bin/*winnt*  # strip segfaults on dll..
+rm -f tools/bin/cassandra.in.sh
+
+# copy default configs
+cp -pr conf/* %{buildroot}/%{_sysconfdir}/%{username}/default.conf/
+
+# step on default config with our redhat one
+cp -p redhat/%{username}.in.sh %{buildroot}/usr/share/%{username}/%{username}.in.sh
+cp -p redhat/%{username} %{buildroot}/%{_sysconfdir}/rc.d/init.d/%{username}
+cp -p redhat/%{username}.conf %{buildroot}/%{_sysconfdir}/security/limits.d/
+cp -p redhat/default %{buildroot}/%{_sysconfdir}/default/%{username}
+
+# copy cassandra bundled libs
+cp -pr lib/* %{buildroot}/usr/share/%{username}/lib/
+
+# copy stress jar
+cp -p build/tools/lib/stress.jar %{buildroot}/usr/share/%{username}/
+
+# copy fqltool jar
+cp -p build/tools/lib/fqltool.jar %{buildroot}/usr/share/%{username}/
+
+# copy binaries
+mv bin/cassandra %{buildroot}/usr/sbin/
+cp -p bin/* %{buildroot}/usr/bin/
+cp -p tools/bin/* %{buildroot}/usr/bin/
+
+# copy cassandra jar
+cp build/apache-cassandra-%{upstream_version}.jar %{buildroot}/usr/share/%{username}/
+
+%clean
+%{__rm} -rf %{buildroot}
+
+%pre
+getent group %{username} >/dev/null || groupadd -r %{username}
+getent passwd %{username} >/dev/null || \
+useradd -d /var/lib/%{username} -g %{username} -M -r %{username}
+exit 0
+
+%files
+%defattr(0644,root,root,0755)
+%doc CHANGES.txt LICENSE.txt README.asc NEWS.txt NOTICE.txt CASSANDRA-14092.txt
+%attr(755,root,root) %{_bindir}/auditlogviewer
+%attr(755,root,root) %{_bindir}/jmxtool
+%attr(755,root,root) %{_bindir}/cassandra-stress
+%attr(755,root,root) %{_bindir}/cqlsh
+%attr(755,root,root) %{_bindir}/cqlsh.py
+%attr(755,root,root) %{_bindir}/debug-cql
+%attr(755,root,root) %{_bindir}/fqltool
+%attr(755,root,root) %{_bindir}/generatetokens
+%attr(755,root,root) %{_bindir}/nodetool
+%attr(755,root,root) %{_bindir}/sstableloader
+%attr(755,root,root) %{_bindir}/sstablescrub
+%attr(755,root,root) %{_bindir}/sstableupgrade
+%attr(755,root,root) %{_bindir}/sstableutil
+%attr(755,root,root) %{_bindir}/sstableverify
+%attr(755,root,root) %{_bindir}/stop-server
+%attr(755,root,root) %{_sbindir}/cassandra
+%attr(755,root,root) /%{_sysconfdir}/rc.d/init.d/%{username}
+%{_sysconfdir}/default/%{username}
+%{_sysconfdir}/security/limits.d/%{username}.conf
+/usr/share/%{username}*
+%config(noreplace) /%{_sysconfdir}/%{username}
+%attr(750,%{username},%{username}) %config(noreplace) /var/lib/%{username}/*
+%attr(750,%{username},%{username}) /var/log/%{username}*
+%attr(750,%{username},%{username}) /var/run/%{username}*
+%{python_sitelib}/cqlshlib/
+%{python_sitelib}/cassandra_pylib*.egg-info
+
+%post
+alternatives --install /%{_sysconfdir}/%{username}/conf %{username} /%{_sysconfdir}/%{username}/default.conf/ 0
+exit 0
+
+%preun
+# only delete alternative on removal, not upgrade
+if [ "$1" = "0" ]; then
+    alternatives --remove %{username} /%{_sysconfdir}/%{username}/default.conf/
+fi
+exit 0
+
+
+%package tools
+Summary:       Extra tools for Cassandra. Cassandra is a highly scalable, eventually consistent, distributed, structured key-value store.
+Group:         Development/Libraries
+Requires:      cassandra = %{version}-%{revision}
+
+%description tools
+Cassandra is a distributed (peer-to-peer) system for the management and storage of structured data.
+.
+This package contains extra tools for working with Cassandra clusters.
+
+%files tools
+%attr(755,root,root) %{_bindir}/sstabledump
+%attr(755,root,root) %{_bindir}/compaction-stress
+%attr(755,root,root) %{_bindir}/sstableexpiredblockers
+%attr(755,root,root) %{_bindir}/sstablelevelreset
+%attr(755,root,root) %{_bindir}/sstablemetadata
+%attr(755,root,root) %{_bindir}/sstableofflinerelevel
+%attr(755,root,root) %{_bindir}/sstablerepairedset
+%attr(755,root,root) %{_bindir}/sstablesplit
+%attr(755,root,root) %{_bindir}/auditlogviewer
+%attr(755,root,root) %{_bindir}/jmxtool
+%attr(755,root,root) %{_bindir}/fqltool
+%attr(755,root,root) %{_bindir}/generatetokens
+%attr(755,root,root) %{_bindir}/hash_password
+
+
+%changelog
+* Mon Dec 05 2016 Michael Shuler <mshuler@apache.org>
+- 2.1.17, 2.2.9, 3.0.11, 3.10
+- Reintroduce RPM packaging
diff --git a/redhat/noboolean/default b/redhat/noboolean/default
new file mode 120000 (symlink)
index 0000000..446d58f
--- /dev/null
@@ -0,0 +1 @@
+../default
\ No newline at end of file
index 34c7e2ed2fdfb5d100c9256e1528cbf73e522746..84dd0361f111dd86ae34427db7c42f88dd9db4f8 100644 (file)
@@ -178,6 +178,7 @@ K_VARINT:      V A R I N T;
 K_TIMEUUID:    T I M E U U I D;
 K_TOKEN:       T O K E N;
 K_WRITETIME:   W R I T E T I M E;
+K_MAXWRITETIME:M A X W R I T E T I M E;
 K_DATE:        D A T E;
 K_TIME:        T I M E;
 
index d061ee4df35ae3ca58212ba6ebaafb9729ea03e0..b349e165275520f95b73f3ccd1e6c6bb46557639 100644 (file)
@@ -414,11 +414,12 @@ simpleUnaliasedSelector returns [Selectable.Raw s]
     ;
 
 selectionFunction returns [Selectable.Raw s]
-    : K_COUNT '(' '\*' ')'                      { $s = Selectable.WithFunction.Raw.newCountRowsFunction(); }
-    | K_WRITETIME '(' c=sident ')'              { $s = new Selectable.WritetimeOrTTL.Raw(c, true); }
-    | K_TTL       '(' c=sident ')'              { $s = new Selectable.WritetimeOrTTL.Raw(c, false); }
-    | K_CAST      '(' sn=unaliasedSelector K_AS t=native_type ')' {$s = new Selectable.WithCast.Raw(sn, t);}
-    | f=functionName args=selectionFunctionArgs { $s = new Selectable.WithFunction.Raw(f, args); }
+    : K_COUNT        '(' '\*' ')'                                    { $s = Selectable.WithFunction.Raw.newCountRowsFunction(); }
+    | K_MAXWRITETIME '(' c=sident m=selectorModifier[c] ')'          { $s = new Selectable.WritetimeOrTTL.Raw(c, m, Selectable.WritetimeOrTTL.Kind.MAX_WRITE_TIME); }
+    | K_WRITETIME    '(' c=sident m=selectorModifier[c] ')'          { $s = new Selectable.WritetimeOrTTL.Raw(c, m, Selectable.WritetimeOrTTL.Kind.WRITE_TIME); }
+    | K_TTL          '(' c=sident m=selectorModifier[c] ')'          { $s = new Selectable.WritetimeOrTTL.Raw(c, m, Selectable.WritetimeOrTTL.Kind.TTL); }
+    | K_CAST         '(' sn=unaliasedSelector K_AS t=native_type ')' { $s = new Selectable.WithCast.Raw(sn, t);}
+    | f=functionName args=selectionFunctionArgs                      { $s = new Selectable.WithFunction.Raw(f, args); }
     ;
 
 selectionLiteral returns [Term.Raw value]
@@ -1870,7 +1871,7 @@ non_type_ident returns [ColumnIdentifier id]
 
 unreserved_keyword returns [String str]
     : u=unreserved_function_keyword     { $str = u; }
-    | k=(K_TTL | K_COUNT | K_WRITETIME | K_KEY | K_CAST | K_JSON | K_DISTINCT) { $str = $k.text; }
+    | k=(K_TTL | K_COUNT | K_WRITETIME | K_MAXWRITETIME | K_KEY | K_CAST | K_JSON | K_DISTINCT) { $str = $k.text; }
     ;
 
 unreserved_function_keyword returns [String str]
index d0d2d745d7786cfc9bf59570287fe706209d4dee..ac62bfae004db5dba876a4a8fb7e37d0f908cc60 100644 (file)
 package org.apache.cassandra.auth;
 
 import java.net.InetAddress;
+import java.security.cert.Certificate;
 
 import org.apache.cassandra.exceptions.ConfigurationException;
 
 public class AllowAllInternodeAuthenticator implements IInternodeAuthenticator
 {
-    public boolean authenticate(InetAddress remoteAddress, int remotePort)
+    public boolean authenticate(InetAddress remoteAddress, int remotePort,
+                                Certificate[] certificates, InternodeConnectionDirection connectionType)
     {
         return true;
     }
index 0344de921db02cc4ab0d4e172855c9db8e38746b..c2272707ecd25dce3ebb95ca1cfe0b7a271a8254 100644 (file)
@@ -43,6 +43,7 @@ import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.StorageProxy;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -386,6 +387,12 @@ public class CassandraRoleManager implements IRoleManager
     {
         // The delay is to give the node a chance to see its peers before attempting the operation
         ScheduledExecutors.optionalTasks.scheduleSelfRecurring(() -> {
+            if (!StorageProxy.isSafeToPerformRead())
+            {
+                logger.trace("Setup task may not run due to it not being safe to perform reads... rescheduling");
+                scheduleSetupTask(setupTask);
+                return;
+            }
             try
             {
                 setupTask.call();
index 8e09b9035f010e45417139b9c8ab3f04a41a84ae..e5038c09447c8fbb2af919bb01ec3765a1e2b376 100644 (file)
@@ -20,6 +20,7 @@
 package org.apache.cassandra.auth;
 
 import java.net.InetAddress;
+import java.security.cert.Certificate;
 
 import org.apache.cassandra.exceptions.ConfigurationException;
 
@@ -33,7 +34,35 @@ public interface IInternodeAuthenticator
      * @param remotePort port of the connecting node.
      * @return true if the connection should be accepted, false otherwise.
      */
-    boolean authenticate(InetAddress remoteAddress, int remotePort);
+    @Deprecated
+    default boolean authenticate(InetAddress remoteAddress, int remotePort)
+    {
+        return false;
+    }
+
+    /**
+     * Decides whether a peer is allowed to connect to this node.
+     * If this method returns false, the socket will be immediately closed.
+     * <p>
+     * Default implementation calls authenticate method by IP and port method
+     * <p>
+     * 1. If it is IP based authentication ignore the certificates & connectionType parameters in the implementation
+     * of this method.
+     * 2. For certificate based authentication like mTLS, server's identity for outbound connections is verified by the
+     * trusted root certificates in the outbound_keystore. In such cases this method may be overridden to return true
+     * when certificateType is OUTBOUND, as the authentication of the server happens during SSL Handshake.
+     *
+     * @param remoteAddress  ip address of the connecting node.
+     * @param remotePort     port of the connecting node.
+     * @param certificates   peer certificates
+     * @param connectionType If the connection is inbound/outbound connection.
+     * @return true if the connection should be accepted, false otherwise.
+     */
+    default boolean authenticate(InetAddress remoteAddress, int remotePort,
+                                 Certificate[] certificates, InternodeConnectionDirection connectionType)
+    {
+        return authenticate(remoteAddress, remotePort);
+    }
 
     /**
      * Validates configuration of IInternodeAuthenticator implementation (if configurable).
@@ -41,4 +70,30 @@ public interface IInternodeAuthenticator
      * @throws ConfigurationException when there is a configuration error.
      */
     void validateConfiguration() throws ConfigurationException;
+
+    /**
+     * Setup is called once upon system startup to initialize the IAuthenticator.
+     *
+     * For example, use this method to create any required keyspaces/column families.
+     */
+    default void setupInternode()
+    {
+
+    }
+
+    /**
+     * Enum that represents connection type of internode connection.
+     *
+     * INBOUND - called after connection established, with certificate available if present.
+     * OUTBOUND - called after connection established, with certificate available if present.
+     * OUTBOUND_PRECONNECT - called before initiating a connection, without certificate available.
+     * The outbound connection will be authenticated with the certificate once a redirected connection is established.
+     * This is an extra check that can be used to detect misconfiguration before reconnection, or ignored by returning true.
+     */
+    enum InternodeConnectionDirection
+    {
+        INBOUND,
+        OUTBOUND,
+        OUTBOUND_PRECONNECT
+    }
 }
diff --git a/src/java/org/apache/cassandra/concurrent/DebuggableTask.java b/src/java/org/apache/cassandra/concurrent/DebuggableTask.java
new file mode 100644 (file)
index 0000000..ac04eb4
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.concurrent;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * Interface to include on a Runnable or Callable submitted to the {@link SharedExecutorPool} to provide more
+ * detailed diagnostics.
+ */
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface DebuggableTask
+{
+    public long creationTimeNanos();
+
+    public long startTimeNanos();
+
+    public String description();
+    
+    interface RunnableDebuggableTask extends Runnable, DebuggableTask {}
+
+    /**
+     * Wraps a {@link DebuggableTask} to include the name of the thread running it.
+     */
+    public static class RunningDebuggableTask implements DebuggableTask
+    {
+        private final DebuggableTask task;
+        private final String threadId;
+
+        public RunningDebuggableTask(String threadId, DebuggableTask task)
+        {
+            this.task = task;
+            this.threadId = threadId;
+        }
+
+        public String threadId()
+        {
+            return threadId;
+        }
+
+        public boolean hasTask()
+        {
+            return task != null;
+        }
+
+        @Override
+        public long creationTimeNanos()
+        {
+            assert hasTask();
+            return task.creationTimeNanos();
+        }
+
+        @Override
+        public long startTimeNanos()
+        {
+            assert hasTask();
+            return task.startTimeNanos();
+        }
+
+        @Override
+        public String description()
+        {
+            assert hasTask();
+            return task.description();
+        }
+    }
+}
index 7fa7dcbd5466262cf934da86c546261dcdf2ecd5..27ab885e234eddac4fae7a5f6d6d63f3e8993bca 100644 (file)
@@ -21,6 +21,7 @@ package org.apache.cassandra.concurrent;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Future;
 
+import org.apache.cassandra.concurrent.DebuggableTask.RunnableDebuggableTask;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -105,6 +106,14 @@ public class ExecutionFailure
         return enforceOptions(withResources, wrap, false);
     }
 
+    /**
+     * @see #suppressing(WithResources, Runnable)
+     */
+    static RunnableDebuggableTask suppressingDebuggable(WithResources withResources, RunnableDebuggableTask debuggable)
+    {
+        return enforceOptionsDebuggable(withResources, debuggable, false);
+    }
+
     /**
      * Encapsulate the execution, propagating or suppressing any exceptions as requested.
      *
@@ -119,7 +128,7 @@ public class ExecutionFailure
             @Override
             public void run()
             {
-                try (Closeable close = withResources.get())
+                try (@SuppressWarnings("unused") Closeable close = withResources.get())
                 {
                     wrap.run();
                 }
@@ -139,6 +148,54 @@ public class ExecutionFailure
         };
     }
 
+    /**
+     * @see #enforceOptions(WithResources, Runnable, boolean)
+     */
+    private static RunnableDebuggableTask enforceOptionsDebuggable(WithResources withResources, RunnableDebuggableTask debuggable, boolean propagate)
+    {
+        return new RunnableDebuggableTask()
+        {
+            @Override
+            public void run()
+            {
+                try (@SuppressWarnings("unused") Closeable close = withResources.get())
+                {
+                    debuggable.run();
+                }
+                catch (Throwable t)
+                {
+                    handle(t);
+                    if (propagate)
+                        throw t;
+                }
+            }
+
+            @Override
+            public String toString()
+            {
+                return debuggable.toString();
+            }
+
+            @Override
+            public long creationTimeNanos()
+            {
+                return debuggable.creationTimeNanos();
+            }
+
+            @Override
+            public long startTimeNanos()
+            {
+                return debuggable.startTimeNanos();
+            }
+
+            @Override
+            public String description()
+            {
+                return debuggable.description();
+            }
+        };
+    }
+
     /**
      * See {@link #enforceOptions(WithResources, Callable)}
      */
@@ -158,7 +215,7 @@ public class ExecutionFailure
             @Override
             public V call() throws Exception
             {
-                try (Closeable close = withResources.get())
+                try (@SuppressWarnings("unused") Closeable close = withResources.get())
                 {
                     return wrap.call();
                 }
index f7d93e8379362c8ff0c54cac37a4bc60327df4ac..0a62747628e81cade1c0dde3a6509d49c5b5b7a0 100644 (file)
@@ -183,14 +183,25 @@ public interface ExecutorFactory extends ExecutorBuilderFactory.Jmxable<Executor
         // deliberately not volatile to ensure zero overhead outside of testing;
         // depend on other memory visibility primitives to ensure visibility
         private static ExecutorFactory FACTORY = new ExecutorFactory.Default(Global.class.getClassLoader(), null, JVMStabilityInspector::uncaughtException);
+        private static boolean modified;
+
         public static ExecutorFactory executorFactory()
         {
             return FACTORY;
         }
 
-        public static void unsafeSet(ExecutorFactory executorFactory)
+        public static synchronized void unsafeSet(ExecutorFactory executorFactory)
         {
             FACTORY = executorFactory;
+            modified = true;
+        }
+
+        public static synchronized boolean tryUnsafeSet(ExecutorFactory executorFactory)
+        {
+            if (modified)
+                return false;
+            unsafeSet(executorFactory);
+            return true;
         }
     }
 
index 2348ff6bf88c52bec3c07c0e1247d601361d373d..763884a2dad28f5309b4a2b7b09658e9c3fa4467 100644 (file)
@@ -20,9 +20,10 @@ package org.apache.cassandra.concurrent;
 
 import java.util.concurrent.Callable;
 
-import org.apache.cassandra.utils.concurrent.RunnableFuture;
+import javax.annotation.Nullable;
 
 import org.apache.cassandra.utils.concurrent.AsyncFuture;
+import org.apache.cassandra.utils.concurrent.RunnableFuture;
 
 /**
  * A FutureTask that utilises Cassandra's {@link AsyncFuture}, making it compatible with {@link ExecutorPlus}.
@@ -31,15 +32,28 @@ import org.apache.cassandra.utils.concurrent.AsyncFuture;
 public class FutureTask<V> extends AsyncFuture<V> implements RunnableFuture<V>
 {
     private Callable<? extends V> call;
+    private volatile DebuggableTask debuggable;
 
     public FutureTask(Callable<? extends V> call)
     {
-        this.call = call;
+        this(call, call instanceof DebuggableTask ? (DebuggableTask) call : null);
     }
 
     public FutureTask(Runnable run)
     {
-        this.call = callable(run);
+        this(callable(run), run instanceof DebuggableTask ? (DebuggableTask) run : null);
+    }
+
+    private FutureTask(Callable<? extends V> call, DebuggableTask debuggable)
+    {
+        this.call = call;
+        this.debuggable = debuggable;
+    }
+
+    @Nullable
+    DebuggableTask debuggableTask()
+    {
+        return debuggable;
     }
 
     V call() throws Exception
@@ -63,6 +77,7 @@ public class FutureTask<V> extends AsyncFuture<V> implements RunnableFuture<V>
         finally
         {
             call = null;
+            debuggable = null;
         }
     }
 
index c7b9abf719ab2064f08a0b7e1d4e9cb4aaf83e3c..fe16c950dfdad5e1527967fc60c72298c7854835 100644 (file)
@@ -48,6 +48,8 @@ final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnabl
     long prevStopCheck = 0;
     long soleSpinnerSpinTime = 0;
 
+    private final AtomicReference<Runnable> currentTask = new AtomicReference<>();
+
     SEPWorker(ThreadGroup threadGroup, Long workerId, Work initialState, SharedExecutorPool pool)
     {
         this.pool = pool;
@@ -58,9 +60,27 @@ final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnabl
         thread.start();
     }
 
+    /**
+     * @return the current {@link DebuggableTask}, if one exists
+     */
+    public DebuggableTask currentDebuggableTask()
+    {
+        // can change after null check so go off local reference
+        Runnable task = currentTask.get();
+
+        // Local read and mutation Runnables are themselves debuggable
+        if (task instanceof DebuggableTask)
+            return (DebuggableTask) task;
+
+        if (task instanceof FutureTask)
+            return ((FutureTask<?>) task).debuggableTask();
+            
+        return null;
+    }
+
     public void run()
     {
-        /**
+        /*
          * we maintain two important invariants:
          * 1)   after exiting spinning phase, we ensure at least one more task on _each_ queue will be processed
          *      promptly after we begin, assuming any are outstanding on any pools. this is to permit producers to
@@ -101,8 +121,10 @@ final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnabl
                 if (assigned == null)
                     continue;
                 if (SET_THREAD_NAME)
-                    Thread.currentThread().setName(assigned.name + "-" + workerId);
+                    Thread.currentThread().setName(assigned.name + '-' + workerId);
+
                 task = assigned.tasks.poll();
+                currentTask.lazySet(task);
 
                 // if we do have tasks assigned, nobody will change our state so we can simply set it to WORKING
                 // (which is also a state that will never be interrupted externally)
@@ -128,9 +150,12 @@ final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnabl
                         break;
 
                     task = assigned.tasks.poll();
+                    currentTask.lazySet(task);
                 }
 
                 // return our work permit, and maybe signal shutdown
+                currentTask.lazySet(null);
+
                 if (status != RETURNED_WORK_PERMIT)
                     assigned.returnWorkPermit();
 
@@ -173,6 +198,11 @@ final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnabl
                 logger.error("Unexpected exception killed worker", t);
             }
         }
+        finally
+        {
+            currentTask.lazySet(null);
+            pool.workerEnded(this);
+        }
     }
 
     // try to assign this worker the provided work
@@ -420,4 +450,22 @@ final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnabl
             return assigned != null;
         }
     }
+
+    @Override
+    public String toString()
+    {
+        return thread.getName();
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return workerId.intValue();
+    }
+
+    @Override
+    public boolean equals(Object obj)
+    {
+        return obj == this;
+    }
 }
index f74854f9cb015f7cf61f50fe54108d85919a0dc2..0631ec61da011ad3e82b9463391c424d94f9766d 100644 (file)
  */
 package org.apache.cassandra.concurrent;
 
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.TimeUnit;
@@ -26,6 +29,9 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.LockSupport;
+import java.util.stream.Collectors;
+
+import org.apache.cassandra.concurrent.DebuggableTask.RunningDebuggableTask;
 
 import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 import static org.apache.cassandra.concurrent.SEPWorker.Work;
@@ -77,6 +83,8 @@ public class SharedExecutorPool
     final ConcurrentSkipListMap<Long, SEPWorker> spinning = new ConcurrentSkipListMap<>();
     // the collection of threads that have been asked to stop/deschedule - new workers are scheduled from here last
     final ConcurrentSkipListMap<Long, SEPWorker> descheduled = new ConcurrentSkipListMap<>();
+    // All SEPWorkers that are currently running
+    private final Set<SEPWorker> allWorkers = Collections.newSetFromMap(new ConcurrentHashMap<>());
 
     volatile boolean shuttingDown = false;
 
@@ -102,7 +110,23 @@ public class SharedExecutorPool
                 return;
 
         if (!work.isStop())
-            new SEPWorker(threadGroup, workerId.incrementAndGet(), work, this);
+        {
+            SEPWorker worker = new SEPWorker(threadGroup, workerId.incrementAndGet(), work, this);
+            allWorkers.add(worker);
+        }
+    }
+
+    void workerEnded(SEPWorker worker)
+    {
+        allWorkers.remove(worker);
+    }
+
+    public List<RunningDebuggableTask> runningTasks()
+    {
+        return allWorkers.stream()
+                         .map(worker -> new RunningDebuggableTask(worker.toString(), worker.currentDebuggableTask()))
+                         .filter(RunningDebuggableTask::hasTask)
+                         .collect(Collectors.toList());
     }
 
     void maybeStartSpinningWorker()
index 56087d950b28b99da7efce64395812ab377cf9bf..faeabe6c4c77c67bd727d3a01b3ecf6e0bff51ea 100644 (file)
@@ -20,6 +20,7 @@ package org.apache.cassandra.concurrent;
 
 import java.util.concurrent.Callable;
 
+import org.apache.cassandra.concurrent.DebuggableTask.RunnableDebuggableTask;
 import org.apache.cassandra.utils.Shared;
 import org.apache.cassandra.utils.WithResources;
 import org.apache.cassandra.utils.concurrent.RunnableFuture;
@@ -127,6 +128,9 @@ public interface TaskFactory
         @Override
         public Runnable toExecute(Runnable runnable)
         {
+            if (runnable instanceof RunnableDebuggableTask)
+                return ExecutionFailure.suppressingDebuggable(ExecutorLocals.propagate(), (RunnableDebuggableTask) runnable);
+
             // no reason to propagate exception when it is inaccessible to caller
             return ExecutionFailure.suppressing(ExecutorLocals.propagate(), runnable);
         }
index 6eea3239765e5eebcedb764b3976de53f6c98823..00c2f4cd28bef444d83c74bda7ca0081178a1a46 100644 (file)
@@ -289,10 +289,12 @@ public enum CassandraRelevantProperties
     /** property for the interval on which the repeated client warnings and diagnostic events about disk usage are ignored */
     DISK_USAGE_NOTIFY_INTERVAL_MS("cassandra.disk_usage.notify_interval_ms", Long.toString(TimeUnit.MINUTES.toMillis(30))),
 
+    /** Controls the type of bufffer (heap/direct) used for shared scratch buffers */
+    DATA_OUTPUT_BUFFER_ALLOCATE_TYPE("cassandra.dob.allocate_type"),
+
     // for specific tests
     ORG_APACHE_CASSANDRA_CONF_CASSANDRA_RELEVANT_PROPERTIES_TEST("org.apache.cassandra.conf.CassandraRelevantPropertiesTest"),
     ORG_APACHE_CASSANDRA_DB_VIRTUAL_SYSTEM_PROPERTIES_TABLE_TEST("org.apache.cassandra.db.virtual.SystemPropertiesTableTest"),
-
     ;
 
 
@@ -454,6 +456,40 @@ public enum CassandraRelevantProperties
         System.setProperty(key, Long.toString(value));
     }
 
+    /**
+     * Gets the value of a system property as a enum, calling {@link String#toUpperCase()} first.
+     *
+     * @param defaultValue to return when not defined
+     * @param <T> type
+     * @return enum value
+     */
+    public <T extends Enum<T>> T getEnum(T defaultValue) {
+        return getEnum(true, defaultValue);
+    }
+
+    /**
+     * Gets the value of a system property as a enum, optionally calling {@link String#toUpperCase()} first.
+     *
+     * @param toUppercase before converting to enum
+     * @param defaultValue to return when not defined
+     * @param <T> type
+     * @return enum value
+     */
+    public <T extends Enum<T>> T getEnum(boolean toUppercase, T defaultValue) {
+        String value = System.getProperty(key);
+        if (value == null)
+            return defaultValue;
+        return Enum.valueOf(defaultValue.getDeclaringClass(), toUppercase ? value.toUpperCase() : value);
+    }
+
+    /**
+     * Sets the value into system properties.
+     * @param value to set
+     */
+    public void setEnum(Enum<?> value) {
+        System.setProperty(key, value.name());
+    }
+
     public interface PropertyConverter<T>
     {
         T convert(String value);
index 098b046d4e4d49f6b3b5e6ca91c3f143d396d85f..68091ac90f27491e9ab894197167f2f5a0002b0e 100644 (file)
@@ -28,6 +28,8 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.function.Supplier;
 
+import javax.annotation.Nullable;
+
 import com.google.common.base.Joiner;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
@@ -315,7 +317,7 @@ public class Config
     public Integer unlogged_batch_across_partitions_warn_threshold = 10;
     public volatile Integer concurrent_compactors;
     @Replaces(oldName = "compaction_throughput_mb_per_sec", converter = Converters.MEBIBYTES_PER_SECOND_DATA_RATE, deprecated = true)
-    public volatile DataRateSpec.IntMebibytesPerSecondBound compaction_throughput = new DataRateSpec.IntMebibytesPerSecondBound("16MiB/s");
+    public volatile DataRateSpec.LongBytesPerSecondBound compaction_throughput = new DataRateSpec.LongBytesPerSecondBound("64MiB/s");
     @Replaces(oldName = "compaction_large_partition_warning_threshold_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
     public volatile DataStorageSpec.IntMebibytesBound compaction_large_partition_warning_threshold = new DataStorageSpec.IntMebibytesBound("100MiB");
     @Replaces(oldName = "min_free_space_per_drive_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
@@ -325,19 +327,22 @@ public class Config
     public volatile int concurrent_materialized_view_builders = 1;
     public volatile int reject_repair_compaction_threshold = Integer.MAX_VALUE;
 
+    // The number of executors to use for building secondary indexes
+    public int concurrent_index_builders = 2;
+
     /**
      * @deprecated retry support removed on CASSANDRA-10992
      */
     @Deprecated
     public int max_streaming_retries = 3;
 
-    @Replaces(oldName = "stream_throughput_outbound_megabits_per_sec", converter = Converters.MEGABITS_TO_MEBIBYTES_PER_SECOND_DATA_RATE, deprecated = true)
-    public volatile DataRateSpec.IntMebibytesPerSecondBound stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound("24MiB/s");
-    @Replaces(oldName = "inter_dc_stream_throughput_outbound_megabits_per_sec", converter = Converters.MEGABITS_TO_MEBIBYTES_PER_SECOND_DATA_RATE, deprecated = true)
-    public volatile DataRateSpec.IntMebibytesPerSecondBound inter_dc_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound("24MiB/s");
+    @Replaces(oldName = "stream_throughput_outbound_megabits_per_sec", converter = Converters.MEGABITS_TO_BYTES_PER_SECOND_DATA_RATE, deprecated = true)
+    public volatile DataRateSpec.LongBytesPerSecondBound stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
+    @Replaces(oldName = "inter_dc_stream_throughput_outbound_megabits_per_sec", converter = Converters.MEGABITS_TO_BYTES_PER_SECOND_DATA_RATE, deprecated = true)
+    public volatile DataRateSpec.LongBytesPerSecondBound inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
 
-    public volatile DataRateSpec.IntMebibytesPerSecondBound entire_sstable_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound("24MiB/s");
-    public volatile DataRateSpec.IntMebibytesPerSecondBound entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound("24MiB/s");
+    public volatile DataRateSpec.LongBytesPerSecondBound entire_sstable_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
+    public volatile DataRateSpec.LongBytesPerSecondBound entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
 
     public String[] data_file_directories = new String[0];
 
@@ -380,6 +385,9 @@ public class Config
     // When true, new CDC mutations are rejected/blocked when reaching max CDC storage.
     // When false, new CDC mutations can always be added. But it will remove the oldest CDC commit log segment on full.
     public volatile boolean cdc_block_writes = true;
+    // When true, CDC data in SSTable go through commit logs during internodes streaming, e.g. repair
+    // When false, it behaves the same as normal streaming.
+    public volatile boolean cdc_on_repair_enabled = true;
     public String cdc_raw_directory;
     @Replaces(oldName = "cdc_total_space_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
     public DataStorageSpec.IntMebibytesBound cdc_total_space = new DataStorageSpec.IntMebibytesBound("0MiB");
@@ -423,7 +431,8 @@ public class Config
     @Replaces(oldName = "trickle_fsync_interval_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true)
     public DataStorageSpec.IntKibibytesBound trickle_fsync_interval = new DataStorageSpec.IntKibibytesBound("10240KiB");
 
-    @Replaces(oldName = "sstable_preemptive_open_interval_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    @Nullable
+    @Replaces(oldName = "sstable_preemptive_open_interval_in_mb", converter = Converters.NEGATIVE_MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
     public volatile DataStorageSpec.IntMebibytesBound sstable_preemptive_open_interval = new DataStorageSpec.IntMebibytesBound("50MiB");
 
     public volatile boolean key_cache_migrate_during_compaction = true;
@@ -448,7 +457,7 @@ public class Config
 
     public DataStorageSpec.LongMebibytesBound paxos_cache_size = null;
 
-    @Replaces(oldName = "cache_load_timeout_seconds", converter = Converters.SECONDS_DURATION, deprecated = true)
+    @Replaces(oldName = "cache_load_timeout_seconds", converter = Converters.NEGATIVE_SECONDS_DURATION, deprecated = true)
     public DurationSpec.IntSecondsBound cache_load_timeout = new DurationSpec.IntSecondsBound("30s");
 
     private static boolean isClientMode = false;
@@ -501,13 +510,14 @@ public class Config
 
     @Replaces(oldName = "index_summary_capacity_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_LONG, deprecated = true)
     public volatile DataStorageSpec.LongMebibytesBound index_summary_capacity;
-    @Replaces(oldName = "index_summary_resize_interval_in_minutes", converter = Converters.MINUTES_DURATION, deprecated = true)
+    @Nullable
+    @Replaces(oldName = "index_summary_resize_interval_in_minutes", converter = Converters.MINUTES_CUSTOM_DURATION, deprecated = true)
     public volatile DurationSpec.IntMinutesBound index_summary_resize_interval = new DurationSpec.IntMinutesBound("60m");
 
     @Replaces(oldName = "gc_log_threshold_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
-    public DurationSpec.IntMillisecondsBound gc_log_threshold = new DurationSpec.IntMillisecondsBound("200ms");
+    public volatile DurationSpec.IntMillisecondsBound gc_log_threshold = new DurationSpec.IntMillisecondsBound("200ms");
     @Replaces(oldName = "gc_warn_threshold_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
-    public DurationSpec.IntMillisecondsBound gc_warn_threshold = new DurationSpec.IntMillisecondsBound("1s");
+    public volatile DurationSpec.IntMillisecondsBound gc_warn_threshold = new DurationSpec.IntMillisecondsBound("1s");
 
     // TTL for different types of trace events.
     @Replaces(oldName = "tracetype_query_ttl", converter = Converters.SECONDS_DURATION, deprecated=true)
@@ -777,7 +787,7 @@ public class Config
     public volatile boolean auto_optimise_preview_repair_streams = false;
 
     // see CASSANDRA-17048 and the comment in cassandra.yaml
-    public boolean enable_uuid_sstable_identifiers = false;
+    public boolean uuid_sstable_identifiers_enabled = false;
 
     /**
      * Client mode means that the process is a pure client, that uses C* code base but does
@@ -825,13 +835,16 @@ public class Config
     public volatile Set<ConsistencyLevel> write_consistency_levels_warned = Collections.emptySet();
     public volatile Set<ConsistencyLevel> write_consistency_levels_disallowed = Collections.emptySet();
     public volatile boolean user_timestamps_enabled = true;
+    public volatile boolean alter_table_enabled = true;
     public volatile boolean group_by_enabled = true;
     public volatile boolean drop_truncate_table_enabled = true;
+    public volatile boolean drop_keyspace_enabled = true;
     public volatile boolean secondary_indexes_enabled = true;
     public volatile boolean uncompressed_tables_enabled = true;
     public volatile boolean compact_tables_enabled = true;
     public volatile boolean read_before_write_list_operations_enabled = true;
     public volatile boolean allow_filtering_enabled = true;
+    public volatile boolean simplestrategy_enabled = true;
     public volatile DataStorageSpec.LongBytesBound collection_size_warn_threshold = null;
     public volatile DataStorageSpec.LongBytesBound collection_size_fail_threshold = null;
     public volatile int items_per_collection_warn_threshold = -1;
@@ -843,6 +856,8 @@ public class Config
     public volatile DataStorageSpec.LongBytesBound data_disk_usage_max_disk_size = null;
     public volatile int minimum_replication_factor_warn_threshold = -1;
     public volatile int minimum_replication_factor_fail_threshold = -1;
+    public volatile int maximum_replication_factor_warn_threshold = -1;
+    public volatile int maximum_replication_factor_fail_threshold = -1;
 
     public volatile DurationSpec.LongNanosecondsBound streaming_state_expires = new DurationSpec.LongNanosecondsBound("3d");
     public volatile DataStorageSpec.LongBytesBound streaming_state_size = new DataStorageSpec.LongBytesBound("40MiB");
@@ -1033,7 +1048,7 @@ public class Config
 
     public volatile int max_top_size_partition_count = 10;
     public volatile int max_top_tombstone_partition_count = 10;
-    public volatile DataStorageSpec.LongBytesBound min_tracked_partition_size_bytes = new DataStorageSpec.LongBytesBound("1MiB");
+    public volatile DataStorageSpec.LongBytesBound min_tracked_partition_size = new DataStorageSpec.LongBytesBound("1MiB");
     public volatile long min_tracked_partition_tombstone_count = 5000;
     public volatile boolean top_partitions_enabled = true;
 
index ccfc87b4d53d886b41d5e4c12efa11024885c29f..c898c08d648223836f2649114b0d0b75cf7fba75 100644 (file)
@@ -21,6 +21,8 @@ package org.apache.cassandra.config;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Function;
 
+import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND;
+
 /**
  * Converters for backward compatibility with the old cassandra.yaml where duration, data rate and
  * data storage configuration parameters were provided only by value and the expected unit was part of the configuration
@@ -40,10 +42,10 @@ public enum Converters
     IDENTITY(null, null, o -> o, o -> o),
     MILLIS_DURATION_LONG(Long.class, DurationSpec.LongMillisecondsBound.class,
                          DurationSpec.LongMillisecondsBound::new,
-                         o -> o.toMilliseconds()),
+                         o -> o == null ? null : o.toMilliseconds()),
     MILLIS_DURATION_INT(Integer.class, DurationSpec.IntMillisecondsBound.class,
                         DurationSpec.IntMillisecondsBound::new,
-                        DurationSpec.IntMillisecondsBound::toMilliseconds),
+                        o -> o == null ? null : o.toMilliseconds()),
     MILLIS_DURATION_DOUBLE(Double.class, DurationSpec.IntMillisecondsBound.class,
                            o -> Double.isNaN(o) ? new DurationSpec.IntMillisecondsBound(0) :
                                 new DurationSpec.IntMillisecondsBound(o, TimeUnit.MILLISECONDS),
@@ -57,10 +59,10 @@ public enum Converters
                            o -> o == null ? -1 : o.toMilliseconds()),
     SECONDS_DURATION(Integer.class, DurationSpec.IntSecondsBound.class,
                      DurationSpec.IntSecondsBound::new,
-                     DurationSpec.IntSecondsBound::toSeconds),
+                     o -> o == null ? null : o.toSeconds()),
     NEGATIVE_SECONDS_DURATION(Integer.class, DurationSpec.IntSecondsBound.class,
                               o -> o < 0 ? new DurationSpec.IntSecondsBound(0) : new DurationSpec.IntSecondsBound(o),
-                              DurationSpec.IntSecondsBound::toSeconds),
+                              o -> o == null ? null : o.toSeconds()),
     /**
      * This converter is used to support backward compatibility for Duration parameters where we added the opportunity
      * for the users to add a unit in the parameters' values but we didn't change the names. (key_cache_save_period,
@@ -69,22 +71,29 @@ public enum Converters
      */
     SECONDS_CUSTOM_DURATION(String.class, DurationSpec.IntSecondsBound.class,
                             DurationSpec.IntSecondsBound::inSecondsString,
-                            o -> Long.toString(o.toSeconds())),
-    MINUTES_DURATION(Integer.class, DurationSpec.IntMinutesBound.class,
-                     DurationSpec.IntMinutesBound::new,
-                     DurationSpec.IntMinutesBound::toMinutes),
+                            o -> o == null ? null : Long.toString(o.toSeconds())),
+    /**
+     * This converter is used to support backward compatibility for parameters where in the past -1 was used as a value
+     * Example:  index_summary_resize_interval_in_minutes = -1 and  index_summary_resize_interval = null are equal.
+     */
+    MINUTES_CUSTOM_DURATION(Integer.class, DurationSpec.IntMinutesBound.class,
+                            o -> o == -1 ? null : new DurationSpec.IntMinutesBound(o),
+                            o -> o == null ? -1 : o.toMinutes()),
     MEBIBYTES_DATA_STORAGE_LONG(Long.class, DataStorageSpec.LongMebibytesBound.class,
                                 DataStorageSpec.LongMebibytesBound::new,
-                                DataStorageSpec.LongMebibytesBound::toMebibytes),
+                                o -> o == null ? null : o.toMebibytes()),
     MEBIBYTES_DATA_STORAGE_INT(Integer.class, DataStorageSpec.IntMebibytesBound.class,
                                DataStorageSpec.IntMebibytesBound::new,
-                               DataStorageSpec.IntMebibytesBound::toMebibytes),
+                               o -> o == null ? null : o.toMebibytes()),
+    NEGATIVE_MEBIBYTES_DATA_STORAGE_INT(Integer.class, DataStorageSpec.IntMebibytesBound.class,
+                                        o -> o < 0 ? null : new DataStorageSpec.IntMebibytesBound(o),
+                                        o -> o == null ? -1 : o.toMebibytes()),
     KIBIBYTES_DATASTORAGE(Integer.class, DataStorageSpec.IntKibibytesBound.class,
                           DataStorageSpec.IntKibibytesBound::new,
-                          DataStorageSpec.IntKibibytesBound::toKibibytes),
+                          o -> o == null ? null : o.toKibibytes()),
     BYTES_DATASTORAGE(Integer.class, DataStorageSpec.IntBytesBound.class,
                       DataStorageSpec.IntBytesBound::new,
-                      DataStorageSpec.IntBytesBound::toBytes),
+                      o -> o == null ? null : o.toBytes()),
     /**
      * This converter is used to support backward compatibility for parameters where in the past negative number was used as a value
      * Example: native_transport_max_concurrent_requests_in_bytes_per_ip = -1 and native_transport_max_request_data_in_flight_per_ip = null
@@ -92,17 +101,17 @@ public enum Converters
      */
     BYTES_CUSTOM_DATASTORAGE(Long.class, DataStorageSpec.LongBytesBound.class,
                              o -> o == -1 ? null : new DataStorageSpec.LongBytesBound(o),
-                             DataStorageSpec.LongBytesBound::toBytes),
-    MEBIBYTES_PER_SECOND_DATA_RATE(Integer.class, DataRateSpec.IntMebibytesPerSecondBound.class,
-                                   DataRateSpec.IntMebibytesPerSecondBound::new,
-                                   DataRateSpec.IntMebibytesPerSecondBound::toMebibytesPerSecondAsInt),
+                             o -> o == null ? null : o.toBytes()),
+    MEBIBYTES_PER_SECOND_DATA_RATE(Integer.class, DataRateSpec.LongBytesPerSecondBound.class,
+                                   i -> new DataRateSpec.LongBytesPerSecondBound(i, MEBIBYTES_PER_SECOND),
+                                   o -> o == null ? null : o.toMebibytesPerSecondAsInt()),
     /**
      * This converter is a custom one to support backward compatibility for stream_throughput_outbound and
-     * inter_dc_stream_throughput_outbound which were provided in megatibs per second prior CASSANDRA-15234.
+     * inter_dc_stream_throughput_outbound which were provided in megabits per second prior CASSANDRA-15234.
      */
-    MEGABITS_TO_MEBIBYTES_PER_SECOND_DATA_RATE(Integer.class, DataRateSpec.IntMebibytesPerSecondBound.class,
-                                               i -> DataRateSpec.IntMebibytesPerSecondBound.megabitsPerSecondInMebibytesPerSecond(i),
-                                               DataRateSpec.IntMebibytesPerSecondBound::toMegabitsPerSecondAsInt);
+    MEGABITS_TO_BYTES_PER_SECOND_DATA_RATE(Integer.class, DataRateSpec.LongBytesPerSecondBound.class,
+                                           i -> DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(i),
+                                           o -> o == null ? null : o.toMegabitsPerSecondAsInt());
     private final Class<?> oldType;
     private final Class<?> newType;
     private final Function<Object, Object> convert;
@@ -160,7 +169,6 @@ public enum Converters
      */
     public Object unconvert(Object value)
     {
-        if (value == null) return null;
         return reverseConvert.apply(value);
     }
 }
index 34eac7465a6061a0a7c01e77a03a9983e4278b74..1ec2d1e77420d0bd31b56ab6578d97875fc71a84 100644 (file)
@@ -23,10 +23,10 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 
+import com.google.common.math.DoubleMath;
 import com.google.common.primitives.Ints;
 
 import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.BYTES_PER_SECOND;
-import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND;
 
 /**
  * Represents a data rate type used for cassandra configuration. It supports the opportunity for the users to be able to
@@ -39,7 +39,7 @@ public abstract class DataRateSpec
      */
     private static final Pattern UNITS_PATTERN = Pattern.compile("^(\\d+)(MiB/s|KiB/s|B/s)$");
 
-    private final double quantity;
+    private final long quantity;
 
     private final DataRateUnit unit;
 
@@ -52,7 +52,7 @@ public abstract class DataRateSpec
             throw new IllegalArgumentException("Invalid data rate: " + value + " Accepted units: MiB/s, KiB/s, B/s where " +
                                                 "case matters and " + "only non-negative values are valid");
 
-        quantity = (double) Long.parseLong(matcher.group(1));
+        quantity = Long.parseLong(matcher.group(1));
         unit = DataRateUnit.fromSymbol(matcher.group(2));
     }
 
@@ -63,7 +63,7 @@ public abstract class DataRateSpec
         validateQuantity(value, quantity(), unit(), minUnit, max);
     }
 
-    private DataRateSpec(double quantity, DataRateUnit unit, DataRateUnit minUnit, long max)
+    private DataRateSpec(long quantity, DataRateUnit unit, DataRateUnit minUnit, long max)
     {
         this.quantity = quantity;
         this.unit = unit;
@@ -212,7 +212,7 @@ public abstract class DataRateSpec
     @Override
     public String toString()
     {
-        return Math.round(quantity) + unit.symbol;
+        return (DoubleMath.isMathematicalInteger(quantity) ? (long) quantity : quantity) + unit.symbol;
     }
 
     /**
@@ -238,7 +238,7 @@ public abstract class DataRateSpec
          * @param quantity where quantity shouldn't be bigger than Long.MAX_VALUE - 1 in bytes per second
          * @param unit     in which the provided quantity is
          */
-        public LongBytesPerSecondBound(double quantity, DataRateUnit unit)
+        public LongBytesPerSecondBound(long quantity, DataRateUnit unit)
         {
             super(quantity, unit, BYTES_PER_SECOND, Long.MAX_VALUE);
         }
@@ -252,59 +252,21 @@ public abstract class DataRateSpec
         {
             this(bytesPerSecond, BYTES_PER_SECOND);
         }
-    }
-
-    /**
-     * Represents a data rate used for Cassandra configuration. The bound is [0, Integer.MAX_VALUE) in mebibytes per second.
-     * If the user sets a different unit - we still validate that converted to mebibytes per second the quantity will not exceed
-     * that upper bound. (CASSANDRA-17571)
-     */
-    public final static class IntMebibytesPerSecondBound extends DataRateSpec
-    {
-        /**
-         * Creates a {@code DataRateSpec.IntMebibytesPerSecondBound} of the specified amount with bound [0, Integer.MAX_VALUE) mebibytes per second.
-         *
-         * @param value the data rate
-         */
-        public IntMebibytesPerSecondBound(String value)
-        {
-            super(value, MEBIBYTES_PER_SECOND, Integer.MAX_VALUE);
-        }
-
-        /**
-         * Creates a {@code DataRateSpec.IntMebibytesPerSecondBound} of the specified amount in the specified unit.
-         *
-         * @param quantity where quantity shouldn't be bigger than Integer.MAX_VALUE - 1 in mebibytes per second
-         * @param unit     in which the provided quantity is
-         */
-        public IntMebibytesPerSecondBound(double quantity, DataRateUnit unit)
-        {
-            super(quantity, unit, MEBIBYTES_PER_SECOND, Integer.MAX_VALUE);
-        }
-
-        /**
-         * Creates a {@code DataRateSpec.IntMebibytesPerSecondBound} of the specified amount in mebibytes per second.
-         *
-         * @param mebibytesPerSecond where mebibytesPerSecond shouldn't be bigger than Long.MAX_VALUE-1
-         */
-        public IntMebibytesPerSecondBound(long mebibytesPerSecond)
-        {
-            this (mebibytesPerSecond, MEBIBYTES_PER_SECOND);
-        }
 
         // this one should be used only for backward compatibility for stream_throughput_outbound and inter_dc_stream_throughput_outbound
         // which were in megabits per second in 4.0. Do not start using it for any new properties
-        public static IntMebibytesPerSecondBound megabitsPerSecondInMebibytesPerSecond(long megabitsPerSecond)
+        @Deprecated
+        public static LongBytesPerSecondBound megabitsPerSecondInBytesPerSecond(long megabitsPerSecond)
         {
-            final double MEBIBYTES_PER_MEGABIT = 0.119209289550781;
-            double mebibytesPerSecond = (double) megabitsPerSecond * MEBIBYTES_PER_MEGABIT;
+            final long BYTES_PER_MEGABIT = 125_000;
+            long bytesPerSecond = megabitsPerSecond * BYTES_PER_MEGABIT;
 
             if (megabitsPerSecond >= Integer.MAX_VALUE)
                 throw new IllegalArgumentException("Invalid data rate: " + megabitsPerSecond + " megabits per second; " +
-                                                 "stream_throughput_outbound and inter_dc_stream_throughput_outbound" +
-                                                 " should be between 0 and " + Integer.MAX_VALUE + " in megabits per second");
+                                                   "stream_throughput_outbound and inter_dc_stream_throughput_outbound" +
+                                                   " should be between 0 and " + (Integer.MAX_VALUE - 1) + " in megabits per second");
 
-            return new IntMebibytesPerSecondBound(mebibytesPerSecond, MEBIBYTES_PER_SECOND);
+            return new LongBytesPerSecondBound(bytesPerSecond, BYTES_PER_SECOND);
         }
     }
 
@@ -385,7 +347,7 @@ public abstract class DataRateSpec
             {
                 if (d > MAX / (MEGABITS_PER_MEBIBYTE))
                     return MAX;
-                return Math.round(d * MEGABITS_PER_MEBIBYTE);
+                return d * MEGABITS_PER_MEBIBYTE;
             }
 
             public double convert(double source, DataRateUnit sourceUnit)
index 9a4348d6338c51b0b5e70ea2aa8689ef33fec45f..f0d3acaa61cd7315e3861155630ddb0a10ae2be3 100644 (file)
@@ -315,6 +315,14 @@ public abstract class DataStorageSpec
         {
             return Ints.saturatedCast(unit().toKibibytes(quantity()));
         }
+
+        /**
+         * @return the amount of data storage in bytes.
+         */
+        public long toBytesInLong()
+        {
+           return unit().toBytes(quantity());
+        }
     }
 
     /**
@@ -447,6 +455,16 @@ public abstract class DataStorageSpec
         {
             return Ints.saturatedCast(unit().toMebibytes(quantity()));
         }
+
+        /**
+         * Returns the amount of data storage in bytes as {@code long}
+         *
+         * @return the amount of data storage in bytes.
+         */
+        public long toBytesInLong()
+        {
+            return unit().toBytes(quantity());
+        }
     }
 
     public enum DataStorageUnit
index 16b5c4b78df8f08ea93d206b16638c949080584d..1ce16052feae79950e0a841f92e7bd398e9a26cf 100644 (file)
@@ -93,6 +93,8 @@ import org.apache.cassandra.utils.FBUtilities;
 import static org.apache.cassandra.config.CassandraRelevantProperties.OS_ARCH;
 import static org.apache.cassandra.config.CassandraRelevantProperties.SUN_ARCH_DATA_MODEL;
 import static org.apache.cassandra.config.CassandraRelevantProperties.TEST_JVM_DTEST_DISABLE_SSL;
+import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.BYTES_PER_SECOND;
+import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND;
 import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.MEBIBYTES;
 import static org.apache.cassandra.io.util.FileUtils.ONE_GIB;
 import static org.apache.cassandra.io.util.FileUtils.ONE_MIB;
@@ -281,7 +283,7 @@ public class DatabaseDescriptor
         if (clientInitialized)
             return;
         clientInitialized = true;
-
+        setDefaultFailureDetector();
         Config.setClientMode(true);
         conf = new Config();
         diskOptimizationStrategy = new SpinningDiskOptimizationStrategy();
@@ -398,16 +400,7 @@ public class DatabaseDescriptor
         //InetAddressAndPort and get the right defaults
         InetAddressAndPort.initializeDefaultPort(getStoragePort());
 
-        // below 2 checks are needed in order to match the pre-CASSANDRA-15234 upper bound for those parameters which were still in megabits per second
-        if (conf.stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
-        {
-            throw new ConfigurationException("Invalid value of stream_throughput_outbound: " + conf.stream_throughput_outbound.toString(), false);
-        }
-
-        if (conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
-        {
-            throw new ConfigurationException("Invalid value of inter_dc_stream_throughput_outbound: " + conf.inter_dc_stream_throughput_outbound.toString(), false);
-        }
+        validateUpperBoundStreamingConfig();
 
         if (conf.auto_snapshot_ttl != null)
         {
@@ -729,6 +722,9 @@ public class DatabaseDescriptor
 
             if (preparedStatementsCacheSizeInMiB == 0)
                 throw new NumberFormatException(); // to escape duplicating error message
+
+            // we need this assignment for the Settings virtual table - CASSANDRA-17734
+            conf.prepared_statements_cache_size = new DataStorageSpec.LongMebibytesBound(preparedStatementsCacheSizeInMiB);
         }
         catch (NumberFormatException e)
         {
@@ -745,6 +741,9 @@ public class DatabaseDescriptor
 
             if (keyCacheSizeInMiB < 0)
                 throw new NumberFormatException(); // to escape duplicating error message
+
+            // we need this assignment for the Settings Virtual Table - CASSANDRA-17734
+            conf.key_cache_size = new DataStorageSpec.LongMebibytesBound(keyCacheSizeInMiB);
         }
         catch (NumberFormatException e)
         {
@@ -784,6 +783,9 @@ public class DatabaseDescriptor
                     + conf.paxos_cache_size + "', supported values are <integer> >= 0.", false);
         }
 
+        // we need this assignment for the Settings virtual table - CASSANDRA-17735
+        conf.counter_cache_size = new DataStorageSpec.LongMebibytesBound(counterCacheSizeInMiB);
+
         // if set to empty/"auto" then use 5% of Heap size
         indexSummaryCapacityInMiB = (conf.index_summary_capacity == null)
                                    ? Math.max(1, (int) (Runtime.getRuntime().totalMemory() * 0.05 / 1024 / 1024))
@@ -793,6 +795,9 @@ public class DatabaseDescriptor
             throw new ConfigurationException("index_summary_capacity option was set incorrectly to '"
                                              + conf.index_summary_capacity.toString() + "', it should be a non-negative integer.", false);
 
+        // we need this assignment for the Settings virtual table - CASSANDRA-17735
+        conf.index_summary_capacity = new DataStorageSpec.LongMebibytesBound(indexSummaryCapacityInMiB);
+
         if (conf.user_defined_functions_fail_timeout.toMilliseconds() < conf.user_defined_functions_warn_timeout.toMilliseconds())
             throw new ConfigurationException("user_defined_functions_warn_timeout must less than user_defined_function_fail_timeout", false);
 
@@ -903,6 +908,36 @@ public class DatabaseDescriptor
         logInitializationOutcome(logger);
     }
 
+    @VisibleForTesting
+    static void validateUpperBoundStreamingConfig() throws ConfigurationException
+    {
+        // below 2 checks are needed in order to match the pre-CASSANDRA-15234 upper bound for those parameters which were still in megabits per second
+        if (conf.stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of stream_throughput_outbound: " + conf.stream_throughput_outbound.toString(), false);
+        }
+
+        if (conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of inter_dc_stream_throughput_outbound: " + conf.inter_dc_stream_throughput_outbound.toString(), false);
+        }
+
+        if (conf.entire_sstable_stream_throughput_outbound.toMebibytesPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of entire_sstable_stream_throughput_outbound: " + conf.entire_sstable_stream_throughput_outbound.toString(), false);
+        }
+
+        if (conf.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of entire_sstable_inter_dc_stream_throughput_outbound: " + conf.entire_sstable_inter_dc_stream_throughput_outbound.toString(), false);
+        }
+
+        if (conf.compaction_throughput.toMebibytesPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of compaction_throughput: " + conf.compaction_throughput.toString(), false);
+        }
+    }
+
     @VisibleForTesting
     static void applyConcurrentValidations(Config config)
     {
@@ -1607,9 +1642,7 @@ public class DatabaseDescriptor
 
     public static void setColumnIndexSize(int val)
     {
-        DataStorageSpec.IntKibibytesBound memory = new DataStorageSpec.IntKibibytesBound(val);
-        checkValidForByteConversion(memory, "column_index_size");
-        conf.column_index_size = new DataStorageSpec.IntKibibytesBound(val);
+        conf.column_index_size =  createIntKibibyteBoundAndEnsureItIsValidForByteConversion(val,"column_index_size");
     }
 
     public static int getColumnIndexCacheSize()
@@ -1624,9 +1657,7 @@ public class DatabaseDescriptor
 
     public static void setColumnIndexCacheSize(int val)
     {
-        DataStorageSpec.IntKibibytesBound memory = new DataStorageSpec.IntKibibytesBound(val);
-        checkValidForByteConversion(memory, "column_index_cache_size");
-        conf.column_index_cache_size = new DataStorageSpec.IntKibibytesBound(val);
+        conf.column_index_cache_size = createIntKibibyteBoundAndEnsureItIsValidForByteConversion(val,"column_index_cache_size");
     }
 
     public static int getBatchSizeWarnThreshold()
@@ -1641,7 +1672,7 @@ public class DatabaseDescriptor
 
     public static long getBatchSizeFailThreshold()
     {
-        return conf.batch_size_fail_threshold.toBytes();
+        return conf.batch_size_fail_threshold.toBytesInLong();
     }
 
     public static int getBatchSizeFailThresholdInKiB()
@@ -1656,9 +1687,7 @@ public class DatabaseDescriptor
 
     public static void setBatchSizeWarnThresholdInKiB(int threshold)
     {
-        DataStorageSpec.IntKibibytesBound storage = new DataStorageSpec.IntKibibytesBound(threshold);
-        checkValidForByteConversion(storage, "batch_size_warn_threshold");
-        conf.batch_size_warn_threshold = new DataStorageSpec.IntKibibytesBound(threshold);
+        conf.batch_size_warn_threshold = createIntKibibyteBoundAndEnsureItIsValidForByteConversion(threshold,"batch_size_warn_threshold");
     }
 
     public static void setBatchSizeFailThresholdInKiB(int threshold)
@@ -1956,17 +1985,38 @@ public class DatabaseDescriptor
         return conf.compaction_throughput.toMebibytesPerSecondAsInt();
     }
 
+    public static double getCompactionThroughputBytesPerSec()
+    {
+        return conf.compaction_throughput.toBytesPerSecond();
+    }
+
     public static double getCompactionThroughputMebibytesPerSec()
     {
         return conf.compaction_throughput.toMebibytesPerSecond();
     }
 
+    @VisibleForTesting // only for testing!
+    public static void setCompactionThroughputBytesPerSec(int value)
+    {
+        if (BYTES_PER_SECOND.toMebibytesPerSecond(value) >= Integer.MAX_VALUE)
+            throw new IllegalArgumentException("compaction_throughput: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in MiB/s");
+
+        conf.compaction_throughput = new DataRateSpec.LongBytesPerSecondBound(value);
+    }
+
     public static void setCompactionThroughputMebibytesPerSec(int value)
     {
-        conf.compaction_throughput = new DataRateSpec.IntMebibytesPerSecondBound(value);
+        if (value == Integer.MAX_VALUE)
+            throw new IllegalArgumentException("compaction_throughput: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in MiB/s");
+
+        conf.compaction_throughput = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
     }
 
-    public static long getCompactionLargePartitionWarningThreshold() { return conf.compaction_large_partition_warning_threshold.toBytes(); }
+    public static long getCompactionLargePartitionWarningThreshold() { return conf.compaction_large_partition_warning_threshold.toBytesInLong(); }
 
     public static int getCompactionTombstoneWarningThreshold()
     {
@@ -1983,6 +2033,11 @@ public class DatabaseDescriptor
         return conf.concurrent_validations;
     }
 
+    public static int getConcurrentIndexBuilders()
+    {
+        return conf.concurrent_index_builders;
+    }
+
     public static void setConcurrentValidations(int value)
     {
         value = value > 0 ? value : Integer.MAX_VALUE;
@@ -2001,7 +2056,7 @@ public class DatabaseDescriptor
 
     public static long getMinFreeSpacePerDriveInBytes()
     {
-        return conf.min_free_space_per_drive.toBytes();
+        return conf.min_free_space_per_drive.toBytesInLong();
     }
 
     public static boolean getDisableSTCSInL0()
@@ -2019,19 +2074,39 @@ public class DatabaseDescriptor
         return conf.stream_throughput_outbound.toMegabitsPerSecondAsInt();
     }
 
+    public static double getStreamThroughputOutboundMegabitsPerSecAsDouble()
+    {
+        return conf.stream_throughput_outbound.toMegabitsPerSecond();
+    }
+
     public static double getStreamThroughputOutboundMebibytesPerSec()
     {
         return conf.stream_throughput_outbound.toMebibytesPerSecond();
     }
 
-    public static void setStreamThroughputOutboundMegabitsPerSec(int value)
+    public static double getStreamThroughputOutboundBytesPerSec()
     {
-        conf.stream_throughput_outbound = DataRateSpec.IntMebibytesPerSecondBound.megabitsPerSecondInMebibytesPerSecond(value);
+        return conf.stream_throughput_outbound.toBytesPerSecond();
     }
 
-    public static int getEntireSSTableStreamThroughputOutboundMebibytesPerSecAsInt()
+    public static int getStreamThroughputOutboundMebibytesPerSecAsInt()
     {
-        return conf.entire_sstable_stream_throughput_outbound.toMebibytesPerSecondAsInt();
+        return conf.stream_throughput_outbound.toMebibytesPerSecondAsInt();
+    }
+
+    public static void setStreamThroughputOutboundMebibytesPerSecAsInt(int value)
+    {
+        if (MEBIBYTES_PER_SECOND.toMegabitsPerSecond(value) >= Integer.MAX_VALUE)
+            throw new IllegalArgumentException("stream_throughput_outbound: " + value  +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in megabits/s");
+
+        conf.stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
+    }
+
+    public static void setStreamThroughputOutboundMegabitsPerSec(int value)
+    {
+        conf.stream_throughput_outbound = DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(value);
     }
 
     public static double getEntireSSTableStreamThroughputOutboundMebibytesPerSec()
@@ -2039,9 +2114,19 @@ public class DatabaseDescriptor
         return conf.entire_sstable_stream_throughput_outbound.toMebibytesPerSecond();
     }
 
+    public static double getEntireSSTableStreamThroughputOutboundBytesPerSec()
+    {
+        return conf.entire_sstable_stream_throughput_outbound.toBytesPerSecond();
+    }
+
     public static void setEntireSSTableStreamThroughputOutboundMebibytesPerSec(int value)
     {
-        conf.entire_sstable_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound(value);
+        if (value == Integer.MAX_VALUE)
+            throw new IllegalArgumentException("entire_sstable_stream_throughput_outbound: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in MiB/s");
+
+        conf.entire_sstable_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
     }
 
     public static int getInterDCStreamThroughputOutboundMegabitsPerSec()
@@ -2049,29 +2134,59 @@ public class DatabaseDescriptor
         return conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecondAsInt();
     }
 
+    public static double getInterDCStreamThroughputOutboundMegabitsPerSecAsDouble()
+    {
+        return conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecond();
+    }
+
     public static double getInterDCStreamThroughputOutboundMebibytesPerSec()
     {
         return conf.inter_dc_stream_throughput_outbound.toMebibytesPerSecond();
     }
 
+    public static double getInterDCStreamThroughputOutboundBytesPerSec()
+    {
+        return conf.inter_dc_stream_throughput_outbound.toBytesPerSecond();
+    }
+
+    public static int getInterDCStreamThroughputOutboundMebibytesPerSecAsInt()
+    {
+        return conf.inter_dc_stream_throughput_outbound.toMebibytesPerSecondAsInt();
+    }
+
+    public static void setInterDCStreamThroughputOutboundMebibytesPerSecAsInt(int value)
+    {
+        if (MEBIBYTES_PER_SECOND.toMegabitsPerSecond(value) >= Integer.MAX_VALUE)
+            throw new IllegalArgumentException("inter_dc_stream_throughput_outbound: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in megabits/s");
+
+        conf.inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
+    }
+
     public static void setInterDCStreamThroughputOutboundMegabitsPerSec(int value)
     {
-        conf.inter_dc_stream_throughput_outbound = DataRateSpec.IntMebibytesPerSecondBound.megabitsPerSecondInMebibytesPerSecond(value);
+        conf.inter_dc_stream_throughput_outbound = DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(value);
     }
 
-    public static double getEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec()
+    public static double getEntireSSTableInterDCStreamThroughputOutboundBytesPerSec()
     {
-        return conf.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecond();
+        return conf.entire_sstable_inter_dc_stream_throughput_outbound.toBytesPerSecond();
     }
 
-    public static int getEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSecAsInt()
+    public static double getEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec()
     {
-        return conf.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecondAsInt();
+        return conf.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecond();
     }
 
     public static void setEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec(int value)
     {
-        conf.entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound(value);
+        if (value == Integer.MAX_VALUE)
+            throw new IllegalArgumentException("entire_sstable_inter_dc_stream_throughput_outbound: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in MiB/s");
+
+        conf.entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
     }
 
     /**
@@ -2977,7 +3092,7 @@ public class DatabaseDescriptor
 
     public static long getMaxHintsFileSize()
     {
-        return  conf.max_hints_file_size.toBytes();
+        return  conf.max_hints_file_size.toBytesInLong();
     }
 
     public static ParameterizedClass getHintsCompression()
@@ -3075,14 +3190,21 @@ public class DatabaseDescriptor
         conf.key_cache_migrate_during_compaction = migrateCacheEntry;
     }
 
+    /** This method can return negative number for disabled */
     public static int getSSTablePreemptiveOpenIntervalInMiB()
     {
+        if (conf.sstable_preemptive_open_interval == null)
+            return -1;
         return conf.sstable_preemptive_open_interval.toMebibytes();
     }
 
+    /** Negative number for disabled */
     public static void setSSTablePreemptiveOpenIntervalInMiB(int mib)
     {
-        conf.sstable_preemptive_open_interval = new DataStorageSpec.IntMebibytesBound(mib);
+        if (mib < 0)
+            conf.sstable_preemptive_open_interval = null;
+        else
+            conf.sstable_preemptive_open_interval = new DataStorageSpec.IntMebibytesBound(mib);
     }
 
     public static boolean getTrickleFsync()
@@ -3315,9 +3437,20 @@ public class DatabaseDescriptor
 
     public static int getIndexSummaryResizeIntervalInMinutes()
     {
+        if (conf.index_summary_resize_interval == null)
+            return -1;
+
         return conf.index_summary_resize_interval.toMinutes();
     }
 
+    public static void setIndexSummaryResizeIntervalInMinutes(int value)
+    {
+        if (value == -1)
+            conf.index_summary_resize_interval = null;
+        else
+            conf.index_summary_resize_interval = new DurationSpec.IntMinutesBound(value);
+    }
+
     public static boolean hasLargeAddressSpace()
     {
         // currently we just check if it's a 64bit arch, but any we only really care if the address space is large
@@ -3455,6 +3588,11 @@ public class DatabaseDescriptor
         return conf.gc_log_threshold.toMilliseconds();
     }
 
+    public static void setGCLogThreshold(int gcLogThreshold)
+    {
+        conf.gc_log_threshold = new DurationSpec.IntMillisecondsBound(gcLogThreshold);
+    }
+
     public static EncryptionContext getEncryptionContext()
     {
         return encryptionContext;
@@ -3465,6 +3603,11 @@ public class DatabaseDescriptor
         return conf.gc_warn_threshold.toMilliseconds();
     }
 
+    public static void setGCWarnThreshold(int threshold)
+    {
+        conf.gc_warn_threshold = new DurationSpec.IntMillisecondsBound(threshold);
+    }
+
     public static boolean isCDCEnabled()
     {
         return conf.cdc_enabled;
@@ -3486,6 +3629,16 @@ public class DatabaseDescriptor
         conf.cdc_block_writes = val;
     }
 
+    public static boolean isCDCOnRepairEnabled()
+    {
+        return conf.cdc_on_repair_enabled;
+    }
+
+    public static void setCDCOnRepairEnabled(boolean val)
+    {
+        conf.cdc_on_repair_enabled = val;
+    }
+
     public static String getCDCLogLocation()
     {
         return conf.cdc_raw_directory;
@@ -3697,12 +3850,19 @@ public class DatabaseDescriptor
         commitLogSegmentMgrProvider = provider;
     }
 
+    private static DataStorageSpec.IntKibibytesBound createIntKibibyteBoundAndEnsureItIsValidForByteConversion(int kibibytes, String propertyName)
+    {
+        DataStorageSpec.IntKibibytesBound intKibibytesBound = new DataStorageSpec.IntKibibytesBound(kibibytes);
+        checkValidForByteConversion(intKibibytesBound, propertyName);
+        return intKibibytesBound;
+    }
+
     /**
      * Ensures passed in configuration value is positive and will not overflow when converted to Bytes
      */
     private static void checkValidForByteConversion(final DataStorageSpec.IntKibibytesBound value, String name)
     {
-        long valueInBytes = value.toBytes();
+        long valueInBytes = value.toBytesInLong();
         if (valueInBytes < 0 || valueInBytes > Integer.MAX_VALUE - 1)
         {
             throw new ConfigurationException(String.format("%s must be positive value <= %dB, but was %dB",
@@ -4079,6 +4239,11 @@ public class DatabaseDescriptor
             throw new IllegalArgumentException(String.format("default_keyspace_rf to be set (%d) cannot be less than minimum_replication_factor_fail_threshold (%d)", value, guardrails.getMinimumReplicationFactorFailThreshold()));
         }
 
+        if (guardrails.getMaximumReplicationFactorFailThreshold() != -1 && value > guardrails.getMaximumReplicationFactorFailThreshold())
+        {
+            throw new IllegalArgumentException(String.format("default_keyspace_rf to be set (%d) cannot be greater than maximum_replication_factor_fail_threshold (%d)", value, guardrails.getMaximumReplicationFactorFailThreshold()));
+        }
+
         conf.default_keyspace_rf = value;
     }
 
@@ -4141,7 +4306,7 @@ public class DatabaseDescriptor
 
     public static boolean isUUIDSSTableIdentifiersEnabled()
     {
-        return conf.enable_uuid_sstable_identifiers;
+        return conf.uuid_sstable_identifiers_enabled;
     }
 
     public static DurationSpec.LongNanosecondsBound getRepairStateExpires()
@@ -4197,14 +4362,14 @@ public class DatabaseDescriptor
         conf.max_top_tombstone_partition_count = value;
     }
 
-    public static DataStorageSpec.LongBytesBound getMinTrackedPartitionSize()
+    public static DataStorageSpec.LongBytesBound getMinTrackedPartitionSizeInBytes()
     {
-        return conf.min_tracked_partition_size_bytes;
+        return conf.min_tracked_partition_size;
     }
 
-    public static void setMinTrackedPartitionSize(DataStorageSpec.LongBytesBound spec)
+    public static void setMinTrackedPartitionSizeInBytes(DataStorageSpec.LongBytesBound spec)
     {
-        conf.min_tracked_partition_size_bytes = spec;
+        conf.min_tracked_partition_size = spec;
     }
 
     public static long getMinTrackedPartitionTombstoneCount()
index eb6724f96d87b3fddd67fbd4f1d568f34e377fff..0ab653f0888515b8d949781e249fc589dabf8ad9 100644 (file)
@@ -25,20 +25,14 @@ import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
 
-import javax.net.ssl.KeyManagerFactory;
-import javax.net.ssl.SSLException;
-import javax.net.ssl.TrustManagerFactory;
-
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.fasterxml.jackson.annotation.JsonIgnore;
 import org.apache.cassandra.locator.IEndpointSnitch;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.security.AbstractSslContextFactory;
 import org.apache.cassandra.security.DisableSslContextFactory;
 import org.apache.cassandra.security.ISslContextFactory;
 import org.apache.cassandra.utils.FBUtilities;
@@ -111,6 +105,8 @@ public class EncryptionOptions
     {
         KEYSTORE("keystore"),
         KEYSTORE_PASSWORD("keystore_password"),
+        OUTBOUND_KEYSTORE("outbound_keystore"),
+        OUTBOUND_KEYSTORE_PASSWORD("outbound_keystore_password"),
         TRUSTSTORE("truststore"),
         TRUSTSTORE_PASSWORD("truststore_password"),
         CIPHER_SUITES("cipher_suites"),
@@ -263,11 +259,8 @@ public class EncryptionOptions
         }
     }
 
-    private void initializeSslContextFactory()
+    protected void fillSslContextParams(Map<String, Object> sslContextFactoryParameters)
     {
-        Map<String,Object> sslContextFactoryParameters = new HashMap<>();
-        prepareSslContextFactoryParameterizedKeys(sslContextFactoryParameters);
-
         /*
          * Copy all configs to the Map to pass it on to the ISslContextFactory's implementation
          */
@@ -284,6 +277,13 @@ public class EncryptionOptions
         putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.REQUIRE_ENDPOINT_VERIFICATION, this.require_endpoint_verification);
         putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.ENABLED, this.enabled);
         putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.OPTIONAL, this.optional);
+    }
+
+    private void initializeSslContextFactory()
+    {
+        Map<String, Object> sslContextFactoryParameters = new HashMap<>();
+        prepareSslContextFactoryParameterizedKeys(sslContextFactoryParameters);
+        fillSslContextParams(sslContextFactoryParameters);
 
         if (CassandraRelevantProperties.TEST_JVM_DTEST_DISABLE_SSL.getBoolean())
         {
@@ -296,8 +296,7 @@ public class EncryptionOptions
         }
     }
 
-    private void putSslContextFactoryParameter(Map<String,Object> existingParameters, ConfigKey configKey,
-                                               Object value)
+    protected static void putSslContextFactoryParameter(Map<String, Object> existingParameters, ConfigKey configKey, Object value)
     {
         if (value != null) {
             existingParameters.put(configKey.getKeyName(), value);
@@ -608,15 +607,20 @@ public class EncryptionOptions
         public final InternodeEncryption internode_encryption;
         @Replaces(oldName = "enable_legacy_ssl_storage_port", deprecated = true)
         public final boolean legacy_ssl_storage_port_enabled;
+        public final String outbound_keystore;
+        public final String outbound_keystore_password;
 
         public ServerEncryptionOptions()
         {
             this.internode_encryption = InternodeEncryption.none;
             this.legacy_ssl_storage_port_enabled = false;
+            this.outbound_keystore = null;
+            this.outbound_keystore_password = null;
         }
 
         public ServerEncryptionOptions(ParameterizedClass sslContextFactoryClass, String keystore,
-                                       String keystore_password, String truststore, String truststore_password,
+                                       String keystore_password,String outbound_keystore,
+                                       String outbound_keystore_password, String truststore, String truststore_password,
                                        List<String> cipher_suites, String protocol, List<String> accepted_protocols,
                                        String algorithm, String store_type, boolean require_client_auth,
                                        boolean require_endpoint_verification, Boolean optional,
@@ -627,6 +631,8 @@ public class EncryptionOptions
             null, optional);
             this.internode_encryption = internode_encryption;
             this.legacy_ssl_storage_port_enabled = legacy_ssl_storage_port_enabled;
+            this.outbound_keystore = outbound_keystore;
+            this.outbound_keystore_password = outbound_keystore_password;
         }
 
         public ServerEncryptionOptions(ServerEncryptionOptions options)
@@ -634,6 +640,16 @@ public class EncryptionOptions
             super(options);
             this.internode_encryption = options.internode_encryption;
             this.legacy_ssl_storage_port_enabled = options.legacy_ssl_storage_port_enabled;
+            this.outbound_keystore = options.outbound_keystore;
+            this.outbound_keystore_password = options.outbound_keystore_password;
+        }
+
+        @Override
+        protected void fillSslContextParams(Map<String, Object> sslContextFactoryParameters)
+        {
+            super.fillSslContextParams(sslContextFactoryParameters);
+            putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.OUTBOUND_KEYSTORE, this.outbound_keystore);
+            putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.OUTBOUND_KEYSTORE_PASSWORD, this.outbound_keystore_password);
         }
 
         @Override
@@ -697,7 +713,6 @@ public class EncryptionOptions
          * values of "dc" and "all". This method returns the explicit, raw value of {@link #optional}
          * as set by the user (if set at all).
          */
-        @JsonIgnore
         public boolean isExplicitlyOptional()
         {
             return optional != null && optional;
@@ -705,7 +720,8 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withSslContextFactory(ParameterizedClass sslContextFactoryClass)
         {
-            return new ServerEncryptionOptions(sslContextFactoryClass, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(sslContextFactoryClass, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -714,7 +730,8 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withKeyStore(String keystore)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -723,7 +740,8 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withKeyStorePassword(String keystore_password)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -732,7 +750,8 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withTrustStore(String truststore)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -741,7 +760,8 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withTrustStorePassword(String truststore_password)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -750,16 +770,18 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withCipherSuites(List<String> cipher_suites)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
                                                legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
-        public ServerEncryptionOptions withCipherSuites(String ... cipher_suites)
+        public ServerEncryptionOptions withCipherSuites(String... cipher_suites)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, Arrays.asList(cipher_suites), protocol,
                                                accepted_protocols, algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -768,7 +790,8 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withProtocol(String protocol)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -777,7 +800,8 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withAcceptedProtocols(List<String> accepted_protocols)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -786,7 +810,8 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withAlgorithm(String algorithm)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -795,7 +820,8 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withStoreType(String store_type)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -804,7 +830,8 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withRequireClientAuth(boolean require_client_auth)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -813,7 +840,8 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withRequireEndpointVerification(boolean require_endpoint_verification)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -822,7 +850,8 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withOptional(boolean optional)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -831,7 +860,8 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withInternodeEncryption(InternodeEncryption internode_encryption)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -840,12 +870,32 @@ public class EncryptionOptions
 
         public ServerEncryptionOptions withLegacySslStoragePort(boolean enable_legacy_ssl_storage_port)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
                                                enable_legacy_ssl_storage_port).applyConfigInternal();
         }
 
+        public ServerEncryptionOptions withOutboundKeystore(String outboundKeystore)
+        {
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outboundKeystore, outbound_keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
+        }
+
+        public ServerEncryptionOptions withOutboundKeystorePassword(String outboundKeystorePassword)
+        {
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outboundKeystorePassword, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
+        }
     }
 }
index e4694b9c307ca3dbf06d1fb31c53794a76746263..e84e0e2a9f94d5c7a9b50649f6bba76e45e4724c 100644 (file)
@@ -81,7 +81,8 @@ public class GuardrailsOptions implements GuardrailsConfig
         validateMaxIntThreshold(config.fields_per_udt_warn_threshold, config.fields_per_udt_fail_threshold, "fields_per_udt");
         validatePercentageThreshold(config.data_disk_usage_percentage_warn_threshold, config.data_disk_usage_percentage_fail_threshold, "data_disk_usage_percentage");
         validateDataDiskUsageMaxDiskSize(config.data_disk_usage_max_disk_size);
-        validateMinRFThreshold(config.minimum_replication_factor_warn_threshold, config.minimum_replication_factor_fail_threshold, "minimum_replication_factor");
+        validateMinRFThreshold(config.minimum_replication_factor_warn_threshold, config.minimum_replication_factor_fail_threshold);
+        validateMaxRFThreshold(config.maximum_replication_factor_warn_threshold, config.maximum_replication_factor_fail_threshold);
     }
 
     @Override
@@ -343,6 +344,20 @@ public class GuardrailsOptions implements GuardrailsConfig
                                   x -> config.drop_truncate_table_enabled = x);
     }
 
+    @Override
+    public boolean getDropKeyspaceEnabled()
+    {
+        return config.drop_keyspace_enabled;
+    }
+
+    public void setDropKeyspaceEnabled(boolean enabled)
+    {
+        updatePropertyWithLogging("drop_keyspace_enabled",
+                                  enabled,
+                                  () -> config.drop_keyspace_enabled,
+                                  x -> config.drop_keyspace_enabled = x);
+    }
+
     @Override
     public boolean getSecondaryIndexesEnabled()
     {
@@ -385,6 +400,20 @@ public class GuardrailsOptions implements GuardrailsConfig
                                   x -> config.compact_tables_enabled = x);
     }
 
+    @Override
+    public boolean getAlterTableEnabled()
+    {
+        return config.alter_table_enabled;
+    }
+
+    public void setAlterTableEnabled(boolean enabled)
+    {
+        updatePropertyWithLogging("alter_table_enabled",
+                                  enabled,
+                                  () -> config.alter_table_enabled,
+                                  x -> config.alter_table_enabled = x);
+    }
+
     @Override
     public boolean getReadBeforeWriteListOperationsEnabled()
     {
@@