DataOutputBuffer#scratchBuffer can use off-heap or on-heap memory as a means to contr... trunk
authorDavid Capwell <dcapwell@apache.org>
Fri, 12 Aug 2022 22:20:37 +0000 (15:20 -0700)
committerDavid Capwell <dcapwell@apache.org>
Sat, 13 Aug 2022 03:04:14 +0000 (20:04 -0700)
patch by David Capwell; reviewed by Caleb Rackliffe for CASSANDRA-16471

407 files changed:
.build/build-rat.xml
.circleci/config-2_1.yml
.circleci/config.yml
.circleci/config.yml.HIGHRES
.circleci/config.yml.LOWRES
.circleci/config.yml.MIDRES
CHANGES.txt
NEWS.txt
README.asc
build.xml
checkstyle.xml
conf/cassandra.yaml
doc/cql3/CQL.textile
doc/modules/cassandra/pages/cql/changes.adoc
doc/modules/cassandra/pages/cql/dml.adoc
doc/modules/cassandra/pages/faq/index.adoc
doc/modules/cassandra/pages/new/virtualtables.adoc
doc/modules/cassandra/pages/operating/compaction/index.adoc
ide/nbproject/project.xml
pylib/cqlshlib/formatting.py
pylib/cqlshlib/pylexotron.py
redhat/noboolean/README [new file with mode: 0644]
redhat/noboolean/cassandra [new symlink]
redhat/noboolean/cassandra.conf [new symlink]
redhat/noboolean/cassandra.in.sh [new symlink]
redhat/noboolean/cassandra.spec [new file with mode: 0644]
redhat/noboolean/default [new symlink]
src/antlr/Parser.g
src/java/org/apache/cassandra/auth/CassandraRoleManager.java
src/java/org/apache/cassandra/auth/IInternodeAuthenticator.java
src/java/org/apache/cassandra/concurrent/DebuggableTask.java [new file with mode: 0644]
src/java/org/apache/cassandra/concurrent/ExecutionFailure.java
src/java/org/apache/cassandra/concurrent/ExecutorFactory.java
src/java/org/apache/cassandra/concurrent/FutureTask.java
src/java/org/apache/cassandra/concurrent/SEPWorker.java
src/java/org/apache/cassandra/concurrent/SharedExecutorPool.java
src/java/org/apache/cassandra/concurrent/TaskFactory.java
src/java/org/apache/cassandra/config/CassandraRelevantProperties.java
src/java/org/apache/cassandra/config/Config.java
src/java/org/apache/cassandra/config/Converters.java
src/java/org/apache/cassandra/config/DataRateSpec.java
src/java/org/apache/cassandra/config/DataStorageSpec.java
src/java/org/apache/cassandra/config/DatabaseDescriptor.java
src/java/org/apache/cassandra/config/GuardrailsOptions.java
src/java/org/apache/cassandra/config/YamlConfigurationLoader.java
src/java/org/apache/cassandra/cql3/CQL3Type.java
src/java/org/apache/cassandra/cql3/ColumnIdentifier.java
src/java/org/apache/cassandra/cql3/QueryProcessor.java
src/java/org/apache/cassandra/cql3/Tuples.java
src/java/org/apache/cassandra/cql3/UserTypes.java
src/java/org/apache/cassandra/cql3/conditions/ColumnCondition.java
src/java/org/apache/cassandra/cql3/selection/AggregateFunctionSelector.java
src/java/org/apache/cassandra/cql3/selection/ColumnTimestamps.java [new file with mode: 0644]
src/java/org/apache/cassandra/cql3/selection/ElementsSelector.java
src/java/org/apache/cassandra/cql3/selection/FieldSelector.java
src/java/org/apache/cassandra/cql3/selection/ListSelector.java
src/java/org/apache/cassandra/cql3/selection/MapSelector.java
src/java/org/apache/cassandra/cql3/selection/ResultSetBuilder.java
src/java/org/apache/cassandra/cql3/selection/RowTimestamps.java [new file with mode: 0644]
src/java/org/apache/cassandra/cql3/selection/ScalarFunctionSelector.java
src/java/org/apache/cassandra/cql3/selection/Selectable.java
src/java/org/apache/cassandra/cql3/selection/Selection.java
src/java/org/apache/cassandra/cql3/selection/Selector.java
src/java/org/apache/cassandra/cql3/selection/SetSelector.java
src/java/org/apache/cassandra/cql3/selection/SimpleSelector.java
src/java/org/apache/cassandra/cql3/selection/TermSelector.java
src/java/org/apache/cassandra/cql3/selection/TupleSelector.java
src/java/org/apache/cassandra/cql3/selection/UserTypeSelector.java
src/java/org/apache/cassandra/cql3/selection/WritetimeOrTTLSelector.java
src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
src/java/org/apache/cassandra/cql3/statements/schema/DropKeyspaceStatement.java
src/java/org/apache/cassandra/db/ArrayClusteringBound.java
src/java/org/apache/cassandra/db/BufferClusteringBound.java
src/java/org/apache/cassandra/db/BufferClusteringBoundary.java
src/java/org/apache/cassandra/db/BufferDecoratedKey.java
src/java/org/apache/cassandra/db/Clustering.java
src/java/org/apache/cassandra/db/ClusteringBound.java
src/java/org/apache/cassandra/db/ClusteringBoundOrBoundary.java
src/java/org/apache/cassandra/db/ClusteringComparator.java
src/java/org/apache/cassandra/db/ClusteringPrefix.java
src/java/org/apache/cassandra/db/ColumnFamilyStore.java
src/java/org/apache/cassandra/db/ColumnFamilyStoreMBean.java
src/java/org/apache/cassandra/db/Columns.java
src/java/org/apache/cassandra/db/DataRange.java
src/java/org/apache/cassandra/db/DecoratedKey.java
src/java/org/apache/cassandra/db/DeletionInfo.java
src/java/org/apache/cassandra/db/Directories.java
src/java/org/apache/cassandra/db/DisallowedDirectories.java
src/java/org/apache/cassandra/db/EmptyIterators.java
src/java/org/apache/cassandra/db/Keyspace.java
src/java/org/apache/cassandra/db/MutableDeletionInfo.java
src/java/org/apache/cassandra/db/NativeDecoratedKey.java
src/java/org/apache/cassandra/db/PartitionPosition.java
src/java/org/apache/cassandra/db/RangeTombstoneList.java
src/java/org/apache/cassandra/db/RegularAndStaticColumns.java
src/java/org/apache/cassandra/db/SSTableImporter.java
src/java/org/apache/cassandra/db/SnapshotDetailsTabularData.java
src/java/org/apache/cassandra/db/SystemKeyspace.java
src/java/org/apache/cassandra/db/aggregation/AggregationSpecification.java
src/java/org/apache/cassandra/db/aggregation/GroupMaker.java
src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java
src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java
src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java
src/java/org/apache/cassandra/db/commitlog/CommitLog.java
src/java/org/apache/cassandra/db/compaction/CompactionManager.java
src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java
src/java/org/apache/cassandra/db/compaction/Scrubber.java
src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java
src/java/org/apache/cassandra/db/compaction/Verifier.java
src/java/org/apache/cassandra/db/filter/ClusteringIndexNamesFilter.java
src/java/org/apache/cassandra/db/guardrails/Guardrails.java
src/java/org/apache/cassandra/db/guardrails/GuardrailsConfig.java
src/java/org/apache/cassandra/db/guardrails/GuardrailsMBean.java
src/java/org/apache/cassandra/db/lifecycle/LogReplica.java
src/java/org/apache/cassandra/db/marshal/AbstractTimeUUIDType.java
src/java/org/apache/cassandra/db/marshal/AbstractType.java
src/java/org/apache/cassandra/db/marshal/BooleanType.java
src/java/org/apache/cassandra/db/marshal/ByteArrayAccessor.java
src/java/org/apache/cassandra/db/marshal/ByteArrayObjectFactory.java
src/java/org/apache/cassandra/db/marshal/ByteBufferAccessor.java
src/java/org/apache/cassandra/db/marshal/ByteBufferObjectFactory.java
src/java/org/apache/cassandra/db/marshal/ByteType.java
src/java/org/apache/cassandra/db/marshal/CollectionType.java
src/java/org/apache/cassandra/db/marshal/CompositeType.java
src/java/org/apache/cassandra/db/marshal/DateType.java
src/java/org/apache/cassandra/db/marshal/DecimalType.java
src/java/org/apache/cassandra/db/marshal/DoubleType.java
src/java/org/apache/cassandra/db/marshal/DynamicCompositeType.java
src/java/org/apache/cassandra/db/marshal/EmptyType.java
src/java/org/apache/cassandra/db/marshal/FloatType.java
src/java/org/apache/cassandra/db/marshal/Int32Type.java
src/java/org/apache/cassandra/db/marshal/IntegerType.java
src/java/org/apache/cassandra/db/marshal/LexicalUUIDType.java
src/java/org/apache/cassandra/db/marshal/ListType.java
src/java/org/apache/cassandra/db/marshal/LongType.java
src/java/org/apache/cassandra/db/marshal/MapType.java
src/java/org/apache/cassandra/db/marshal/PartitionerDefinedOrder.java
src/java/org/apache/cassandra/db/marshal/ReversedType.java
src/java/org/apache/cassandra/db/marshal/SetType.java
src/java/org/apache/cassandra/db/marshal/ShortType.java
src/java/org/apache/cassandra/db/marshal/SimpleDateType.java
src/java/org/apache/cassandra/db/marshal/TimeType.java
src/java/org/apache/cassandra/db/marshal/TimestampType.java
src/java/org/apache/cassandra/db/marshal/TupleType.java
src/java/org/apache/cassandra/db/marshal/UUIDType.java
src/java/org/apache/cassandra/db/marshal/UserType.java
src/java/org/apache/cassandra/db/marshal/ValueAccessor.java
src/java/org/apache/cassandra/db/memtable/AbstractAllocatorMemtable.java
src/java/org/apache/cassandra/db/memtable/AbstractMemtable.java
src/java/org/apache/cassandra/db/memtable/AbstractMemtableWithCommitlog.java
src/java/org/apache/cassandra/db/memtable/Flushing.java
src/java/org/apache/cassandra/db/memtable/Memtable.java
src/java/org/apache/cassandra/db/memtable/ShardedSkipListMemtable.java
src/java/org/apache/cassandra/db/memtable/SkipListMemtable.java
src/java/org/apache/cassandra/db/partitions/AbstractBTreePartition.java
src/java/org/apache/cassandra/db/partitions/AtomicBTreePartition.java
src/java/org/apache/cassandra/db/partitions/FilteredPartition.java
src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java
src/java/org/apache/cassandra/db/rows/AbstractCell.java
src/java/org/apache/cassandra/db/rows/ArrayCell.java
src/java/org/apache/cassandra/db/rows/BTreeRow.java
src/java/org/apache/cassandra/db/rows/BufferCell.java
src/java/org/apache/cassandra/db/rows/Cell.java
src/java/org/apache/cassandra/db/rows/CellPath.java
src/java/org/apache/cassandra/db/rows/Cells.java
src/java/org/apache/cassandra/db/rows/ColumnData.java
src/java/org/apache/cassandra/db/rows/ComplexColumnData.java
src/java/org/apache/cassandra/db/rows/EncodingStats.java
src/java/org/apache/cassandra/db/rows/RangeTombstoneBoundMarker.java
src/java/org/apache/cassandra/db/rows/RangeTombstoneBoundaryMarker.java
src/java/org/apache/cassandra/db/rows/RangeTombstoneMarker.java
src/java/org/apache/cassandra/db/rows/Row.java
src/java/org/apache/cassandra/db/rows/Rows.java
src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
src/java/org/apache/cassandra/db/rows/WrappingUnfilteredRowIterator.java
src/java/org/apache/cassandra/db/streaming/CassandraStreamManager.java
src/java/org/apache/cassandra/db/view/TableViews.java
src/java/org/apache/cassandra/db/virtual/QueriesTable.java [new file with mode: 0644]
src/java/org/apache/cassandra/db/virtual/SystemViewsKeyspace.java
src/java/org/apache/cassandra/dht/ByteOrderedPartitioner.java
src/java/org/apache/cassandra/dht/LocalPartitioner.java
src/java/org/apache/cassandra/dht/Murmur3Partitioner.java
src/java/org/apache/cassandra/dht/OrderPreservingPartitioner.java
src/java/org/apache/cassandra/dht/RandomPartitioner.java
src/java/org/apache/cassandra/dht/Token.java
src/java/org/apache/cassandra/gms/Gossiper.java
src/java/org/apache/cassandra/hints/HintsCatalog.java
src/java/org/apache/cassandra/hints/HintsWriter.java
src/java/org/apache/cassandra/index/internal/CassandraIndexSearcher.java
src/java/org/apache/cassandra/io/sstable/Descriptor.java
src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java
src/java/org/apache/cassandra/io/sstable/IndexSummaryManagerMBean.java
src/java/org/apache/cassandra/io/sstable/SSTable.java
src/java/org/apache/cassandra/io/sstable/SSTableHeaderFix.java
src/java/org/apache/cassandra/io/sstable/SSTableLoader.java
src/java/org/apache/cassandra/io/util/BufferedDataOutputStreamPlus.java
src/java/org/apache/cassandra/io/util/ChecksumWriter.java
src/java/org/apache/cassandra/io/util/DataOutputBuffer.java
src/java/org/apache/cassandra/io/util/File.java
src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
src/java/org/apache/cassandra/locator/TokenMetadata.java
src/java/org/apache/cassandra/metrics/TopPartitionTracker.java
src/java/org/apache/cassandra/net/Crc.java
src/java/org/apache/cassandra/net/InboundConnectionSettings.java
src/java/org/apache/cassandra/net/OutboundConnectionInitiator.java
src/java/org/apache/cassandra/net/OutboundConnectionSettings.java
src/java/org/apache/cassandra/net/StartupClusterConnectivityChecker.java
src/java/org/apache/cassandra/schema/DefaultSchemaUpdateHandler.java
src/java/org/apache/cassandra/schema/Schema.java
src/java/org/apache/cassandra/serializers/AbstractMapSerializer.java [new file with mode: 0644]
src/java/org/apache/cassandra/serializers/BooleanSerializer.java
src/java/org/apache/cassandra/serializers/CollectionSerializer.java
src/java/org/apache/cassandra/serializers/ListSerializer.java
src/java/org/apache/cassandra/serializers/MapSerializer.java
src/java/org/apache/cassandra/serializers/SetSerializer.java
src/java/org/apache/cassandra/service/CassandraDaemon.java
src/java/org/apache/cassandra/service/GCInspector.java
src/java/org/apache/cassandra/service/StartupCheck.java
src/java/org/apache/cassandra/service/StartupChecks.java
src/java/org/apache/cassandra/service/StorageProxy.java
src/java/org/apache/cassandra/service/StorageService.java
src/java/org/apache/cassandra/service/StorageServiceMBean.java
src/java/org/apache/cassandra/service/paxos/Paxos.java
src/java/org/apache/cassandra/service/paxos/PaxosPrepare.java
src/java/org/apache/cassandra/service/paxos/PaxosRepairHistory.java
src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosBallotTracker.java
src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosStateTracker.java
src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedIndex.java
src/java/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTracker.java
src/java/org/apache/cassandra/service/snapshot/SnapshotLoader.java
src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java
src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java
src/java/org/apache/cassandra/streaming/StreamManager.java
src/java/org/apache/cassandra/streaming/StreamSession.java
src/java/org/apache/cassandra/streaming/StreamTransferTask.java
src/java/org/apache/cassandra/streaming/StreamingChannel.java
src/java/org/apache/cassandra/streaming/async/StreamingMultiplexedChannel.java
src/java/org/apache/cassandra/tools/BulkLoadConnectionFactory.java
src/java/org/apache/cassandra/tools/BulkLoader.java
src/java/org/apache/cassandra/tools/LoaderOptions.java
src/java/org/apache/cassandra/tools/NodeProbe.java
src/java/org/apache/cassandra/tools/nodetool/ClientStats.java
src/java/org/apache/cassandra/tools/nodetool/Compact.java
src/java/org/apache/cassandra/tools/nodetool/CompactionStats.java
src/java/org/apache/cassandra/tools/nodetool/GetCompactionThroughput.java
src/java/org/apache/cassandra/tools/nodetool/GetInterDCStreamThroughput.java
src/java/org/apache/cassandra/tools/nodetool/GetStreamThroughput.java
src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java
src/java/org/apache/cassandra/tools/nodetool/SetInterDCStreamThroughput.java
src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java
src/java/org/apache/cassandra/tools/nodetool/stats/StatsTable.java
src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsHolder.java
src/java/org/apache/cassandra/transport/Dispatcher.java
src/java/org/apache/cassandra/transport/InitialConnectionHandler.java
src/java/org/apache/cassandra/transport/Message.java
src/java/org/apache/cassandra/transport/messages/QueryMessage.java
src/java/org/apache/cassandra/utils/ExpiringMemoizingSupplier.java
src/java/org/apache/cassandra/utils/binlog/BinLog.java
src/java/org/apache/cassandra/utils/binlog/ExternalArchiver.java
src/java/org/apache/cassandra/utils/btree/BTree.java
src/java/org/apache/cassandra/utils/btree/UpdateFunction.java
src/java/org/apache/cassandra/utils/bytecomparable/ByteComparable.java [new file with mode: 0644]
src/java/org/apache/cassandra/utils/bytecomparable/ByteComparable.md [new file with mode: 0644]
src/java/org/apache/cassandra/utils/bytecomparable/ByteSource.java [new file with mode: 0644]
src/java/org/apache/cassandra/utils/bytecomparable/ByteSourceInverse.java [new file with mode: 0644]
src/java/org/apache/cassandra/utils/memory/ByteBufferCloner.java [moved from src/java/org/apache/cassandra/utils/memory/AbstractAllocator.java with 50% similarity]
src/java/org/apache/cassandra/utils/memory/Cloner.java [new file with mode: 0644]
src/java/org/apache/cassandra/utils/memory/ContextAllocator.java [deleted file]
src/java/org/apache/cassandra/utils/memory/EnsureOnHeap.java
src/java/org/apache/cassandra/utils/memory/HeapCloner.java [new file with mode: 0644]
src/java/org/apache/cassandra/utils/memory/HeapPool.java
src/java/org/apache/cassandra/utils/memory/MemtableAllocator.java
src/java/org/apache/cassandra/utils/memory/MemtableBufferAllocator.java
src/java/org/apache/cassandra/utils/memory/NativeAllocator.java
src/java/org/apache/cassandra/utils/memory/SlabAllocator.java
test/burn/org/apache/cassandra/utils/LongBTreeTest.java
test/conf/cassandra-converters-special-cases-old-names.yaml [new file with mode: 0644]
test/conf/cassandra-converters-special-cases.yaml [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/action/GossipHelper.java
test/distributed/org/apache/cassandra/distributed/fuzz/InJvmSutBase.java
test/distributed/org/apache/cassandra/distributed/impl/AbstractCluster.java
test/distributed/org/apache/cassandra/distributed/impl/Instance.java
test/distributed/org/apache/cassandra/distributed/impl/RowUtil.java
test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java
test/distributed/org/apache/cassandra/distributed/test/AlterTest.java
test/distributed/org/apache/cassandra/distributed/test/EphemeralSnapshotTest.java [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/test/GossipTest.java
test/distributed/org/apache/cassandra/distributed/test/InternodeEncryptionEnforcementTest.java
test/distributed/org/apache/cassandra/distributed/test/NativeProtocolTest.java
test/distributed/org/apache/cassandra/distributed/test/PaxosRepairTest2.java
test/distributed/org/apache/cassandra/distributed/test/QueriesTableTest.java [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/test/RepairErrorsTest.java
test/distributed/org/apache/cassandra/distributed/test/SSTableIdGenerationTest.java
test/distributed/org/apache/cassandra/distributed/test/SchemaTest.java
test/distributed/org/apache/cassandra/distributed/test/TopPartitionsTest.java
test/distributed/org/apache/cassandra/distributed/test/hostreplacement/FailedBootstrapTest.java [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/test/hostreplacement/HostReplacementTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/BatchUpgradeTest.java
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityTestBase.java
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30AllOneTest.java [moved from test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XTest.java with 70% similarity]
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30OneAllTest.java [moved from test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30Test.java with 70% similarity]
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV30QuorumQuorumTest.java [moved from src/java/org/apache/cassandra/utils/memory/HeapAllocator.java with 57% similarity]
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XAllOneTest.java [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XOneAllTest.java [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XQuorumQuorumTest.java [new file with mode: 0644]
test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeWritetimeOrTTLTest.java [new file with mode: 0644]
test/long/org/apache/cassandra/cql3/CorruptionTest.java
test/microbench/org/apache/cassandra/test/microbench/AbstractTypeByteSourceDecodingBench.java [new file with mode: 0644]
test/microbench/org/apache/cassandra/test/microbench/btree/AtomicBTreePartitionUpdateBench.java [new file with mode: 0644]
test/microbench/org/apache/cassandra/test/microbench/btree/Megamorphism.java
test/simulator/asm/org/apache/cassandra/simulator/asm/InterceptClasses.java
test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterActionListener.java
test/simulator/main/org/apache/cassandra/simulator/cluster/ClusterActions.java
test/simulator/main/org/apache/cassandra/simulator/package-info.java
test/simulator/main/org/apache/cassandra/simulator/paxos/Ballots.java
test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosRepairValidator.java
test/simulator/main/org/apache/cassandra/simulator/paxos/PaxosTopologyChangeVerifier.java
test/simulator/main/org/apache/cassandra/simulator/utils/KindOfSequence.java
test/simulator/test/org/apache/cassandra/simulator/test/ClassWithSynchronizedMethods.java
test/simulator/test/org/apache/cassandra/simulator/test/ShortPaxosSimulationTest.java
test/simulator/test/org/apache/cassandra/simulator/test/SimulationTestBase.java
test/simulator/test/org/apache/cassandra/simulator/test/TrivialSimulationTest.java
test/unit/org/apache/cassandra/ServerTestUtils.java
test/unit/org/apache/cassandra/Util.java
test/unit/org/apache/cassandra/audit/AuditLoggerAuthTest.java
test/unit/org/apache/cassandra/concurrent/DebuggableScheduledThreadPoolExecutorTest.java
test/unit/org/apache/cassandra/config/DataRateSpecTest.java
test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java
test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java
test/unit/org/apache/cassandra/config/LoadOldYAMLBackwardCompatibilityTest.java
test/unit/org/apache/cassandra/config/ParseAndConvertUnitsTest.java
test/unit/org/apache/cassandra/config/YamlConfigurationLoaderTest.java
test/unit/org/apache/cassandra/cql3/BatchTests.java
test/unit/org/apache/cassandra/cql3/CQLTester.java
test/unit/org/apache/cassandra/cql3/PagingTest.java
test/unit/org/apache/cassandra/cql3/selection/SelectorSerializationTest.java
test/unit/org/apache/cassandra/cql3/validation/entities/CollectionsTest.java
test/unit/org/apache/cassandra/cql3/validation/entities/TupleTypeTest.java
test/unit/org/apache/cassandra/cql3/validation/entities/UserTypesTest.java
test/unit/org/apache/cassandra/cql3/validation/entities/WritetimeOrTTLTest.java
test/unit/org/apache/cassandra/cql3/validation/operations/DeleteTest.java
test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionTest.java
test/unit/org/apache/cassandra/db/CellTest.java
test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
test/unit/org/apache/cassandra/db/DirectoriesTest.java
test/unit/org/apache/cassandra/db/KeyCacheTest.java
test/unit/org/apache/cassandra/db/NativeCellTest.java
test/unit/org/apache/cassandra/db/TopPartitionTrackerTest.java
test/unit/org/apache/cassandra/db/aggregation/GroupMakerTest.java
test/unit/org/apache/cassandra/db/commitlog/CommitLogCQLTest.java
test/unit/org/apache/cassandra/db/compaction/CompactionStrategyManagerTest.java
test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java
test/unit/org/apache/cassandra/db/guardrails/GuardrailDropKeyspaceTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/db/guardrails/GuardrailTester.java
test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java
test/unit/org/apache/cassandra/db/marshal/TypeValidationTest.java
test/unit/org/apache/cassandra/db/rows/RowBuilder.java [deleted file]
test/unit/org/apache/cassandra/db/rows/RowsMergingTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/db/rows/RowsTest.java
test/unit/org/apache/cassandra/db/virtual/SettingsTableTest.java
test/unit/org/apache/cassandra/dht/KeyCollisionTest.java
test/unit/org/apache/cassandra/dht/LengthPartitioner.java
test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterClientTest.java
test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java
test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
test/unit/org/apache/cassandra/locator/NetworkTopologyStrategyTest.java
test/unit/org/apache/cassandra/metrics/BatchMetricsTest.java
test/unit/org/apache/cassandra/metrics/CQLMetricsTest.java
test/unit/org/apache/cassandra/metrics/ClientRequestMetricsTest.java
test/unit/org/apache/cassandra/metrics/KeyspaceMetricsTest.java
test/unit/org/apache/cassandra/metrics/TableMetricsTest.java
test/unit/org/apache/cassandra/schema/SchemaTest.java
test/unit/org/apache/cassandra/serializers/MapSerializerTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/serializers/SetSerializerTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/service/GCInspectorTest.java
test/unit/org/apache/cassandra/service/StartupChecksTest.java
test/unit/org/apache/cassandra/service/StorageProxyTest.java
test/unit/org/apache/cassandra/service/StorageServiceTest.java
test/unit/org/apache/cassandra/service/paxos/uncommitted/PaxosUncommittedTrackerTest.java
test/unit/org/apache/cassandra/service/snapshot/SnapshotLoaderTest.java
test/unit/org/apache/cassandra/service/snapshot/SnapshotManagerTest.java
test/unit/org/apache/cassandra/service/snapshot/SnapshotManifestTest.java
test/unit/org/apache/cassandra/service/snapshot/TableSnapshotTest.java
test/unit/org/apache/cassandra/streaming/StreamManagerTest.java
test/unit/org/apache/cassandra/tools/ToolRunner.java
test/unit/org/apache/cassandra/tools/nodetool/ClientStatsTest.java
test/unit/org/apache/cassandra/tools/nodetool/CompactionStatsTest.java
test/unit/org/apache/cassandra/tools/nodetool/SetGetColumnIndexSizeTest.java
test/unit/org/apache/cassandra/tools/nodetool/SetGetCompactionThroughputTest.java
test/unit/org/apache/cassandra/tools/nodetool/SetGetEntireSSTableInterDCStreamThroughputTest.java
test/unit/org/apache/cassandra/tools/nodetool/SetGetEntireSSTableStreamThroughputTest.java
test/unit/org/apache/cassandra/tools/nodetool/SetGetInterDCStreamThroughputTest.java
test/unit/org/apache/cassandra/tools/nodetool/SetGetStreamThroughputTest.java
test/unit/org/apache/cassandra/tools/nodetool/TpStatsTest.java
test/unit/org/apache/cassandra/transport/CQLUserAuditTest.java
test/unit/org/apache/cassandra/transport/SerDeserTest.java
test/unit/org/apache/cassandra/utils/btree/BTreeTest.java
test/unit/org/apache/cassandra/utils/bytecomparable/AbstractTypeByteSourceTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceComparisonTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceConversionTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceInverseTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceSequenceTest.java [new file with mode: 0644]
test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceTestBase.java [new file with mode: 0644]
test/unit/org/apache/cassandra/utils/bytecomparable/DecoratedKeyByteSourceTest.java [new file with mode: 0644]
tools/stress/src/org/apache/cassandra/io/sstable/StressCQLSSTableWriter.java
tools/stress/src/org/apache/cassandra/stress/CompactionStress.java

index 5632664486d43ee9d13be776bb587b91aa7b82b0..6a3d72e1ca45bd2a5266b2da653dcea8a51e0352 100644 (file)
@@ -58,6 +58,8 @@
                  <exclude NAME="**/doc/antora.yml"/>
                  <exclude name="**/test/conf/cassandra.yaml"/>
                  <exclude name="**/test/conf/cassandra-old.yaml"/>
+                 <exclude name="**/test/conf/cassandra-converters-special-cases-old-names.yaml"/>
+                 <exclude name="**/test/conf/cassandra-converters-special-cases.yaml"/>
                  <exclude name="**/test/conf/cassandra_encryption.yaml"/>
                  <exclude name="**/test/conf/cdc.yaml"/>
                  <exclude name="**/test/conf/commitlog_compression_LZ4.yaml"/>
index 0d113a08ca5a18748830fad2e14ac8370571a5c5..5aa00b16f6b27ebf3dc31f3c47cc4168ee40ed48 100644 (file)
@@ -200,6 +200,10 @@ j8_with_dtests_jobs: &j8_with_dtests_jobs
         requires:
           - start_j8_jvm_dtests
           - j8_build
+    - j8_simulator_dtests:
+        requires:
+          - start_j8_jvm_dtests
+          - j8_build
     - j8_jvm_dtests_vnode:
         requires:
           - start_j8_jvm_dtests
@@ -381,6 +385,9 @@ j8_pre-commit_jobs: &j8_pre-commit_jobs
     - j8_unit_tests:
         requires:
           - j8_build
+    - j8_simulator_dtests:
+        requires:
+          - j8_build
     - j8_jvm_dtests:
         requires:
           - j8_build
@@ -738,6 +745,15 @@ jobs:
       - log_environment
       - run_parallel_junit_tests
 
+  j8_simulator_dtests:
+    <<: *j8_small_executor
+    steps:
+      - attach_workspace:
+          at: /home/cassandra
+      - create_junit_containers
+      - log_environment
+      - run_simulator_tests
+
   j8_jvm_dtests:
     <<: *j8_small_par_executor
     steps:
@@ -1312,6 +1328,33 @@ commands:
 
         no_output_timeout: 15m
 
+  run_simulator_tests:
+    parameters:
+      no_output_timeout:
+        type: string
+        default: 30m
+    steps:
+    - run:
+        name: Run Simulator Tests 
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          ant test-simulator-dtest
+        no_output_timeout: <<parameters.no_output_timeout>>
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+
   run_junit_tests:
     parameters:
       target:
index a82e5d1915fffb416b5a0acb03f937315309b483..07b8fb3437ab787b7c6184f4a0aec44be833dfc0 100644 (file)
@@ -1152,6 +1152,112 @@ jobs:
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_simulator_dtests:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Simulator Tests
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          ant test-simulator-dtest
+        no_output_timeout: 30m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_UTEST_TARGET: testsome
+    - REPEATED_UTEST_CLASS: null
+    - REPEATED_UTEST_METHODS: null
+    - REPEATED_UTEST_VNODES: false
+    - REPEATED_UTEST_COUNT: 100
+    - REPEATED_UTEST_STOP_ON_FAILURE: false
+    - REPEATED_DTEST_NAME: null
+    - REPEATED_DTEST_VNODES: false
+    - REPEATED_DTEST_COUNT: 100
+    - REPEATED_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_UPGRADE_DTEST_NAME: null
+    - REPEATED_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_JVM_UPGRADE_DTEST_CLASS: null
+    - REPEATED_JVM_UPGRADE_DTEST_METHODS: null
+    - REPEATED_JVM_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_JVM_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j8_cqlsh-dtests-py3-with-vnodes:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -3772,6 +3878,10 @@ workflows:
         requires:
         - start_j8_jvm_dtests
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - start_j8_jvm_dtests
+        - j8_build
     - j8_jvm_dtests_vnode:
         requires:
         - start_j8_jvm_dtests
@@ -3938,6 +4048,9 @@ workflows:
     - j8_unit_tests:
         requires:
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - j8_build
     - j8_jvm_dtests:
         requires:
         - j8_build
index 36d51c922c451ef5f0f635bfeda928fc84bc2689..bdb5a82d52b7018d1a2eea4b8278af879a52b6d7 100644 (file)
@@ -1152,6 +1152,112 @@ jobs:
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_simulator_dtests:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Simulator Tests
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          ant test-simulator-dtest
+        no_output_timeout: 30m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_UTEST_TARGET: testsome
+    - REPEATED_UTEST_CLASS: null
+    - REPEATED_UTEST_METHODS: null
+    - REPEATED_UTEST_VNODES: false
+    - REPEATED_UTEST_COUNT: 100
+    - REPEATED_UTEST_STOP_ON_FAILURE: false
+    - REPEATED_DTEST_NAME: null
+    - REPEATED_DTEST_VNODES: false
+    - REPEATED_DTEST_COUNT: 100
+    - REPEATED_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_UPGRADE_DTEST_NAME: null
+    - REPEATED_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_JVM_UPGRADE_DTEST_CLASS: null
+    - REPEATED_JVM_UPGRADE_DTEST_METHODS: null
+    - REPEATED_JVM_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_JVM_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j8_cqlsh-dtests-py3-with-vnodes:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -3772,6 +3878,10 @@ workflows:
         requires:
         - start_j8_jvm_dtests
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - start_j8_jvm_dtests
+        - j8_build
     - j8_jvm_dtests_vnode:
         requires:
         - start_j8_jvm_dtests
@@ -3938,6 +4048,9 @@ workflows:
     - j8_unit_tests:
         requires:
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - j8_build
     - j8_jvm_dtests:
         requires:
         - j8_build
index a82e5d1915fffb416b5a0acb03f937315309b483..07b8fb3437ab787b7c6184f4a0aec44be833dfc0 100644 (file)
@@ -1152,6 +1152,112 @@ jobs:
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_simulator_dtests:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Simulator Tests
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          ant test-simulator-dtest
+        no_output_timeout: 30m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_UTEST_TARGET: testsome
+    - REPEATED_UTEST_CLASS: null
+    - REPEATED_UTEST_METHODS: null
+    - REPEATED_UTEST_VNODES: false
+    - REPEATED_UTEST_COUNT: 100
+    - REPEATED_UTEST_STOP_ON_FAILURE: false
+    - REPEATED_DTEST_NAME: null
+    - REPEATED_DTEST_VNODES: false
+    - REPEATED_DTEST_COUNT: 100
+    - REPEATED_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_UPGRADE_DTEST_NAME: null
+    - REPEATED_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_JVM_UPGRADE_DTEST_CLASS: null
+    - REPEATED_JVM_UPGRADE_DTEST_METHODS: null
+    - REPEATED_JVM_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_JVM_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j8_cqlsh-dtests-py3-with-vnodes:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -3772,6 +3878,10 @@ workflows:
         requires:
         - start_j8_jvm_dtests
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - start_j8_jvm_dtests
+        - j8_build
     - j8_jvm_dtests_vnode:
         requires:
         - start_j8_jvm_dtests
@@ -3938,6 +4048,9 @@ workflows:
     - j8_unit_tests:
         requires:
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - j8_build
     - j8_jvm_dtests:
         requires:
         - j8_build
index 273c109921879166b8161b7227c06a8f21c8948d..dac7e9e351a5911dbfb374ab6103fbd6a57f47ff 100644 (file)
@@ -1152,6 +1152,112 @@ jobs:
     - JAVA_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - JDK_HOME: /usr/lib/jvm/java-11-openjdk-amd64
     - CASSANDRA_USE_JDK11: true
+  j8_simulator_dtests:
+    docker:
+    - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
+    resource_class: medium
+    working_directory: ~/
+    shell: /bin/bash -eo pipefail -l
+    parallelism: 1
+    steps:
+    - attach_workspace:
+        at: /home/cassandra
+    - run:
+        name: Determine unit Tests to Run
+        command: |
+          # reminder: this code (along with all the steps) is independently executed on every circle container
+          # so the goal here is to get the circleci script to return the tests *this* container will run
+          # which we do via the `circleci` cli tool.
+
+          rm -fr ~/cassandra-dtest/upgrade_tests
+          echo "***java tests***"
+
+          # get all of our unit test filenames
+          set -eo pipefail && circleci tests glob "$HOME/cassandra/test/unit/**/*.java" > /tmp/all_java_unit_tests.txt
+
+          # split up the unit tests into groups based on the number of containers we have
+          set -eo pipefail && circleci tests split --split-by=timings --timings-type=filename --index=${CIRCLE_NODE_INDEX} --total=${CIRCLE_NODE_TOTAL} /tmp/all_java_unit_tests.txt > /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt
+          set -eo pipefail && cat /tmp/java_tests_${CIRCLE_NODE_INDEX}.txt | sed "s;^/home/cassandra/cassandra/test/unit/;;g" | grep "Test\.java$"  > /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+          echo "** /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt"
+          cat /tmp/java_tests_${CIRCLE_NODE_INDEX}_final.txt
+        no_output_timeout: 15m
+    - run:
+        name: Log Environment Information
+        command: |
+          echo '*** id ***'
+          id
+          echo '*** cat /proc/cpuinfo ***'
+          cat /proc/cpuinfo
+          echo '*** free -m ***'
+          free -m
+          echo '*** df -m ***'
+          df -m
+          echo '*** ifconfig -a ***'
+          ifconfig -a
+          echo '*** uname -a ***'
+          uname -a
+          echo '*** mount ***'
+          mount
+          echo '*** env ***'
+          env
+          echo '*** java ***'
+          which java
+          java -version
+    - run:
+        name: Run Simulator Tests
+        command: |
+          set -x
+          export PATH=$JAVA_HOME/bin:$PATH
+          time mv ~/cassandra /tmp
+          cd /tmp/cassandra
+          if [ -d ~/dtest_jars ]; then
+            cp ~/dtest_jars/dtest* /tmp/cassandra/build/
+          fi
+          ant test-simulator-dtest
+        no_output_timeout: 30m
+    - store_test_results:
+        path: /tmp/cassandra/build/test/output/
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/output
+        destination: junitxml
+    - store_artifacts:
+        path: /tmp/cassandra/build/test/logs
+        destination: logs
+    environment:
+    - ANT_HOME: /usr/share/ant
+    - JAVA11_HOME: /usr/lib/jvm/java-11-openjdk-amd64
+    - JAVA8_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - LANG: en_US.UTF-8
+    - KEEP_TEST_DIR: true
+    - DEFAULT_DIR: /home/cassandra/cassandra-dtest
+    - PYTHONIOENCODING: utf-8
+    - PYTHONUNBUFFERED: true
+    - CASS_DRIVER_NO_EXTENSIONS: true
+    - CASS_DRIVER_NO_CYTHON: true
+    - CASSANDRA_SKIP_SYNC: true
+    - DTEST_REPO: https://github.com/apache/cassandra-dtest.git
+    - DTEST_BRANCH: trunk
+    - CCM_MAX_HEAP_SIZE: 1024M
+    - CCM_HEAP_NEWSIZE: 256M
+    - REPEATED_UTEST_TARGET: testsome
+    - REPEATED_UTEST_CLASS: null
+    - REPEATED_UTEST_METHODS: null
+    - REPEATED_UTEST_VNODES: false
+    - REPEATED_UTEST_COUNT: 100
+    - REPEATED_UTEST_STOP_ON_FAILURE: false
+    - REPEATED_DTEST_NAME: null
+    - REPEATED_DTEST_VNODES: false
+    - REPEATED_DTEST_COUNT: 100
+    - REPEATED_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_UPGRADE_DTEST_NAME: null
+    - REPEATED_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - REPEATED_JVM_UPGRADE_DTEST_CLASS: null
+    - REPEATED_JVM_UPGRADE_DTEST_METHODS: null
+    - REPEATED_JVM_UPGRADE_DTEST_COUNT: 100
+    - REPEATED_JVM_UPGRADE_DTEST_STOP_ON_FAILURE: false
+    - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-amd64
+    - JDK_HOME: /usr/lib/jvm/java-8-openjdk-amd64
   j8_cqlsh-dtests-py3-with-vnodes:
     docker:
     - image: apache/cassandra-testing-ubuntu2004-java11-w-dependencies:latest
@@ -3772,6 +3878,10 @@ workflows:
         requires:
         - start_j8_jvm_dtests
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - start_j8_jvm_dtests
+        - j8_build
     - j8_jvm_dtests_vnode:
         requires:
         - start_j8_jvm_dtests
@@ -3938,6 +4048,9 @@ workflows:
     - j8_unit_tests:
         requires:
         - j8_build
+    - j8_simulator_dtests:
+        requires:
+        - j8_build
     - j8_jvm_dtests:
         requires:
         - j8_build
index ec2cdd271fe278082c98d64a474382bf3f5f6137..6097abe8977fe817a6f622056c1dc77be82f4ef2 100644 (file)
@@ -1,4 +1,22 @@
 4.2
+ * DataOutputBuffer#scratchBuffer can use off-heap or on-heap memory as a means to control memory allocations (CASSANDRA-16471)
+ * Add ability to read the TTLs and write times of the elements of a collection and/or UDT (CASSANDRA-8877)
+ * Removed Python < 2.7 support from formatting.py (CASSANDRA-17694)
+ * Cleanup pylint issues with pylexotron.py (CASSANDRA-17779)
+ * NPE bug in streaming checking if SSTable is being repaired (CASSANDRA-17801)
+ * Users of NativeLibrary should handle lack of JNA appropriately when running in client mode (CASSANDRA-17794)
+ * Warn on unknown directories found in system keyspace directory rather than kill node during startup checks (CASSANDRA-17777)
+ * Log duplicate rows sharing a partition key found in verify and scrub (CASSANDRA-17789)
+ * Add separate thread pool for Secondary Index building so it doesn't block compactions (CASSANDRA-17781)
+ * Added JMX call to getSSTableCountPerTWCSBucket for TWCS (CASSANDRA-17774)
+ * When doing a host replacement, -Dcassandra.broadcast_interval_ms is used to know when to check the ring but checks that the ring wasn't changed in -Dcassandra.ring_delay_ms, changes to ring delay should not depend on when we publish load stats (CASSANDRA-17776)
+ * When bootstrap fails, CassandraRoleManager may attempt to do read queries that fail with "Cannot read from a bootstrapping node", and increments unavailables counters (CASSANDRA-17754)
+ * Add guardrail to disallow DROP KEYSPACE commands (CASSANDRA-17767)
+ * Remove ephemeral snapshot marker file and introduce a flag to SnapshotManifest (CASSANDRA-16911)
+ * Add a virtual table that exposes currently running queries (CASSANDRA-15241)
+ * Allow sstableloader to specify table without relying on path (CASSANDRA-16584)
+ * Fix TestGossipingPropertyFileSnitch.test_prefer_local_reconnect_on_listen_address (CASSANDRA-17700)
+ * Add ByteComparable API (CASSANDRA-6936)
  * Add guardrail for maximum replication factor (CASSANDRA-17500)
  * Increment CQLSH to version 6.2.0 for release 4.2 (CASSANDRA-17646)
  * Adding support to perform certificate based internode authentication (CASSANDRA-17661)
  * Add guardrail for ALTER TABLE ADD / DROP / REMOVE column operations (CASSANDRA-17495)
  * Rename DisableFlag class to EnableFlag on guardrails (CASSANDRA-17544)
 Merged from 4.1:
+ * Fix a race condition where a keyspace can be oopened while it is being removed (CASSANDRA-17658)
+ * DatabaseDescriptor will set the default failure detector during client initialization (CASSANDRA-17782)
+ * Avoid initializing schema via SystemKeyspace.getPreferredIP() with the BulkLoader tool (CASSANDRA-17740)
+ * Uncomment prepared_statements_cache_size, key_cache_size, counter_cache_size, index_summary_capacity which were
+   commented out by mistake in a previous patch
+   Fix breaking change with cache_load_timeout; cache_load_timeout_seconds <=0 and cache_load_timeout=0 are equivalent
+   and they both mean disabled
+   Deprecate public method setRate(final double throughputMbPerSec) in Compaction Manager in favor of
+   setRateInBytes(final double throughputBytesPerSec)
+   Revert breaking change removal of StressCQLSSTableWriter.Builder.withBufferSizeInMB(int size). Deprecate it in favor
+   of StressCQLSSTableWriter.Builder.withBufferSizeInMiB(int size)
+   Fix precision issues, add new -m flag (for nodetool/setstreamthroughput, nodetool/setinterdcstreamthroughput,
+   nodetool/getstreamthroughput and nodetoo/getinterdcstreamthroughput), add new -d flags (nodetool/getstreamthroughput, nodetool/getinterdcstreamthroughput, nodetool/getcompactionthroughput)
+   Fix a bug with precision in nodetool/compactionstats
+   Deprecate StorageService methods and add new ones for stream_throughput_outbound, inter_dc_stream_throughput_outbound,
+   compaction_throughput_outbound in the JMX MBean `org.apache.cassandra.db:type=StorageService`
+   Removed getEntireSSTableStreamThroughputMebibytesPerSec in favor of new getEntireSSTableStreamThroughputMebibytesPerSecAsDouble
+   in the JMX MBean `org.apache.cassandra.db:type=StorageService`
+   Removed getEntireSSTableInterDCStreamThroughputMebibytesPerSec in favor of getEntireSSTableInterDCStreamThroughputMebibytesPerSecAsDouble
+   in the JMX MBean `org.apache.cassandra.db:type=StorageService` (CASSANDRA-17725)
+ * Fix sstable_preemptive_open_interval disabled value. sstable_preemptive_open_interval = null backward compatible with
+   sstable_preemptive_open_interval_in_mb = -1 (CASSANDRA-17737)
+ * Remove usages of Path#toFile() in the snapshot apparatus (CASSANDRA-17769)
+ * Fix Settings Virtual Table to update paxos_variant after startup and rename enable_uuid_sstable_identifiers to
+   uuid_sstable_identifiers_enabled as per our config naming conventions (CASSANDRA-17738)
+ * index_summary_resize_interval_in_minutes = -1 is equivalent to index_summary_resize_interval being set to null or
+   disabled. JMX MBean IndexSummaryManager, setResizeIntervalInMinutes method still takes resizeIntervalInMinutes = -1 for disabled (CASSANDRA-17735)
+ * min_tracked_partition_size_bytes parameter from 4.1 alpha1 was renamed to min_tracked_partition_size (CASSANDRA-17733)
+ * Remove commons-lang dependency during build runtime (CASSANDRA-17724)
+ * Relax synchronization on StreamSession#onError() to avoid deadlock (CASSANDRA-17706)
+ * Fix AbstractCell#toString throws MarshalException for cell in collection (CASSANDRA-17695)
+ * Add new vtable output option to compactionstats (CASSANDRA-17683)
+ * Fix commitLogUpperBound initialization in AbstractMemtableWithCommitlog (CASSANDRA-17587)
+ * Fix widening to long in getBatchSizeFailThreshold (CASSANDRA-17650)
+ * Fix widening from mebibytes to bytes in IntMebibytesBound (CASSANDRA-17716)
+ * Revert breaking change in nodetool clientstats and expose cient options through nodetool clientstats --client-options. (CASSANDRA-17715)
  * Fix missed nowInSec values in QueryProcessor (CASSANDRA-17458)
  * Revert removal of withBufferSizeInMB(int size) in CQLSSTableWriter.Builder class and deprecate it in favor of withBufferSizeInMiB(int size) (CASSANDRA-17675)
  * Remove expired snapshots of dropped tables after restart (CASSANDRA-17619)
 Merged from 4.0:
+ * Add 'noboolean' rpm build for older distros like CentOS7 (CASSANDRA-17765)
+ * Fix default value for compaction_throughput_mb_per_sec in Config class to match  the one in cassandra.yaml (CASSANDRA-17790)
+ * Fix Setting Virtual Table - update after startup config properties gc_log_threshold_in_ms, gc_warn_threshold_in_ms,
+   conf.index_summary_capacity_in_mb, prepared_statements_cache_size_mb, key_cache_size_in_mb, counter_cache_size_in_mb
+   (CASSANDRA-17737)
+ * Clean up ScheduledExecutors, CommitLog, and MessagingService shutdown for in-JVM dtests (CASSANDRA-17731)
+ * Remove extra write to system table for prepared statements (CASSANDRA-17764)
+Merged from 3.11:
+ * Document usage of closed token intervals in manual compaction (CASSANDRA-17575)
+ * Creating of a keyspace on insufficient number of replicas should filter out gosspping-only members (CASSANDRA-17759)
+Merged from 3.0:
+ * Fix restarting of services on gossipping-only member (CASSANDRA-17752)
+
+
+4.0.5
+ * Utilise BTree improvements to reduce garbage and improve throughput (CASSANDRA-15511)
  * SSL storage port in sstableloader is deprecated (CASSANDRA-17602)
  * Fix counter write timeouts at ONE (CASSANDRA-17411)
  * Fix NPE in getLocalPrimaryRangeForEndpoint (CASSANDRA-17680)
index c9edaa2a7add9f375f2a53f0dd0c6c7813f90f30..96ad4b9ac0c783ac30163450195e769f2a23bea5 100644 (file)
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -63,16 +63,24 @@ New features
       If this is set to false, streaming will be considerably faster however it's possible that, in extreme situations
       (losing > quorum # nodes in a replica set), you may have data in your SSTables that never makes it to the CDC log.
       The default is true/enabled. The configuration can be altered via JMX.
+    - Added support for reading the write times and TTLs of the elements of collections and UDTs, regardless of being
+      frozen or not. The CQL functions writetime, maxwritetime and ttl can now be applied to entire collections/UDTs,
+      single collection/UDT elements and slices of collection/UDT elements.
     - Added a new CQL function, maxwritetime. It shows the largest unix timestamp that the data was written, similar to
-      its sibling CQL function, writetime. Unlike writetime, maxwritetime can be applied to multi-cell data types, e.g.
-      non-frozen collections and UDT, and returns the largest timestamp. One should not to use it when upgrading to 4.2.
+      its sibling CQL function, writetime.
     - New Guardrails added:
       - Whether ALTER TABLE commands are allowed to mutate columns
       - Whether SimpleStrategy is allowed on keyspace creation or alteration
       - Maximum replication factor
+      - Whether DROP KEYSPACE commands are allowed.
+    - It is possible to list ephemeral snapshots by nodetool listsnaphots command when flag "-e" is specified.
 
 Upgrading
 ---------
+    - Emphemeral marker files for snapshots done by repairs are not created anymore, 
+      there is a dedicated flag in snapshot manifest instead. On upgrade of a node to version 4.2, on node's start, in case there 
+      are such ephemeral snapshots on disk, they will be deleted (same behaviour as before) and any new ephemeral snapshots 
+      will stop to create ephemeral marker files as flag in a snapshot manifest was introduced instead.
 
 Deprecation
 -----------
@@ -121,7 +129,8 @@ New features
     - Support for native transport rate limiting via native_transport_rate_limiting_enabled and
       native_transport_max_requests_per_second in cassandra.yaml.
     - Support for pre hashing passwords on CQL DCL commands
-    - Expose all client options via system_views.clients and nodetool clientstats.
+    - Expose all client options via system_views.clients and nodetool clientstats --client-options.
+    - Add new nodetool compactionstats --vtable option to match the sstable_tasks vtable.
     - Support for String concatenation has been added through the + operator.
     - New configuration max_hints_size_per_host to limit the size of local hints files per host in mebibytes. Setting to
       non-positive value disables the limit, which is the default behavior. Setting to a positive value to ensure
@@ -191,6 +200,15 @@ New features
 
 Upgrading
 ---------
+    - `cache_load_timeout_seconds` being negative for disabled is equivalent to `cache_load_timeout` = 0 for disabled.
+    - `sstable_preemptive_open_interval_in_mb` being negative for disabled is equivalent to `sstable_preemptive_open_interval`
+      being null again. In the JMX MBean `org.apache.cassandra.db:type=StorageService`, the setter method
+      `setSSTablePreemptiveOpenIntervalInMB`still takes `intervalInMB` negative numbers for disabled.
+    - `enable_uuid_sstable_identifiers` parameter from 4.1 alpha1 was renamed to `uuid_sstable_identifiers_enabled`.
+    - `index_summary_resize_interval_in_minutes = -1` is equivalent to index_summary_resize_interval being set to `null` or
+      disabled. In the JMX MBean `org.apache.cassandra.db:type=IndexSummaryManager`, the setter method `setResizeIntervalInMinutes` still takes
+      `resizeIntervalInMinutes = -1` for disabled.
+    - min_tracked_partition_size_bytes parameter from 4.1 alpha1 was renamed to min_tracked_partition_size.
     - Parameters of type data storage, duration and data rate cannot be set to Long.MAX_VALUE (former parameters of long type)
       and Integer.MAX_VALUE (former parameters of int type). Those numbers are used during conversion between units to prevent
       an overflow from happening. (CASSANDRA-17571)
@@ -251,7 +269,21 @@ Upgrading
 
 Deprecation
 -----------
-    - `withBufferSizeInMB(int size)` in CQLSSTableWriter.Builder class is deprecated in favor of withBufferSizeInMiB(int size)
+    - In the JMX MBean `org.apache.cassandra.db:type=StorageService`: deprecate getter method `getStreamThroughputMbitPerSec`
+      in favor of getter method `getStreamThroughputMbitPerSecAsDouble`; deprecate getter method `getStreamThroughputMbPerSec`
+      in favor of getter methods `getStreamThroughputMebibytesPerSec` and `getStreamThroughputMebibytesPerSecAsDouble`;
+      deprecate getter method `getInterDCStreamThroughputMbitPerSec` in favor of getter method `getInterDCStreamThroughputMbitPerSecAsDouble`;
+      deprecate getter method `getInterDCStreamThroughputMbPerSec` in favor of getter methods `getInterDCStreamThroughputMebibytesPerSecAsDouble`;
+      deprecate getter method `getCompactionThroughputMbPerSec` in favor of getter methods `getCompactionThroughtputMibPerSecAsDouble`
+      and `getCompactionThroughtputBytesPerSec`; deprecate setter methods `setStreamThroughputMbPerSec` and `setStreamThroughputMbitPerSec`
+      in favor of `setStreamThroughputMebibytesPerSec`; deprecate setter methods `setInterDCStreamThroughputMbitPerSec` and
+      `setInterDCStreamThroughputMbPerSec` in favor of `setInterDCStreamThroughputMebibytesPerSec`. See CASSANDRA-17725 for further details.
+    - Deprecate public method `setRate(final double throughputMbPerSec)` in `Compaction Manager` in favor of
+      `setRateInBytes(final double throughputBytesPerSec)`
+    - `withBufferSizeInMB(int size)` in `StressCQLSSTableWriter.Builder` class is deprecated in favor of `withBufferSizeInMiB(int size)`
+      No change of functionality in the new one, only name change for clarity in regards to units and to follow naming
+      standartization.
+    - `withBufferSizeInMB(int size)` in `CQLSSTableWriter.Builder` class is deprecated in favor of `withBufferSizeInMiB(int size)`
       No change of functionality in the new one, only name change for clarity in regards to units and to follow naming
       standartization.
     - The properties `keyspace_count_warn_threshold` and `table_count_warn_threshold` in cassandra.yaml have been
@@ -358,6 +390,10 @@ New features
 
 Upgrading
 ---------
+    - If you were on 4.0.1 - 4.0.5 and if you haven't set the compaction_thoroughput_mb_per_sec in your 4.0 cassandra.yaml
+      file but you relied on the internal default value,then compaction_throughput_mb_per_sec was equal to an old default
+      value of 16MiB/s in Cassandra 4.0. After CASSANDRA-17790 this is changed to 64MiB/s to match the default value in
+      cassandra.yaml. If you prefer the old one of 16MiB/s, you need to set it explicitly in your cassandra.yaml file.
     - otc_coalescing_strategy, otc_coalescing_window_us, otc_coalescing_enough_coalesced_messages,
       otc_backlog_expiration_interval_ms are deprecated and will be removed at earliest with next major release.
       otc_coalescing_strategy is disabled since 3.11.
index 942bb203b04bed7e5cfd7e09cba55255a932e1ff..cba3a2b42450ebbda70786dc7dd581ef3c45316b 100644 (file)
@@ -39,7 +39,7 @@ be sitting in front of a prompt:
 
 ----
 Connected to Test Cluster at localhost:9160.
-[cqlsh 6.2.0 | Cassandra 4.2-SNAPSHOT | CQL spec 3.4.5 | Native protocol v5]
+[cqlsh 6.2.0 | Cassandra 4.2-SNAPSHOT | CQL spec 3.4.6 | Native protocol v5]
 Use HELP for help.
 cqlsh>
 ----
index 15da1e7df1f626a5e15c7b9ca02c3f501402cbdd..ca346c9f28c9f8d0f67592f385a7240ebe4c6f47 100644 (file)
--- a/build.xml
+++ b/build.xml
@@ -76,8 +76,8 @@
     <property name="test.simulator-asm.src" value="${test.dir}/simulator/asm"/>
     <property name="test.simulator-bootstrap.src" value="${test.dir}/simulator/bootstrap"/>
     <property name="test.simulator-test.src" value="${test.dir}/simulator/test"/>
-    <property name="test.driver.connection_timeout_ms" value="5000"/>
-    <property name="test.driver.read_timeout_ms" value="12000"/>
+    <property name="test.driver.connection_timeout_ms" value="10000"/>
+    <property name="test.driver.read_timeout_ms" value="24000"/>
     <property name="test.jvm.args" value="" />
     <property name="dist.dir" value="${build.dir}/dist"/>
     <property name="tmp.dir" value="${java.io.tmpdir}"/>
     <property name="maven-repository-url" value="https://repository.apache.org/content/repositories/snapshots"/>
     <property name="maven-repository-id" value="apache.snapshots.https"/>
 
-    <property name="test.timeout" value="240000" />
+    <property name="test.timeout" value="480000" />
     <property name="test.memory.timeout" value="480000" />
     <property name="test.long.timeout" value="600000" />
     <property name="test.burn.timeout" value="60000000" />
     <property name="test.distributed.timeout" value="900000" />
+    <property name="test.simulation.timeout" value="1800000" />
 
     <!-- default for cql tests. Can be override by -Dcassandra.test.use_prepared=false -->
     <property name="cassandra.test.use_prepared" value="true" />
           <dependency groupId="org.apache.hadoop" artifactId="hadoop-core" version="1.0.3" scope="provided">
             <exclusion groupId="org.mortbay.jetty" artifactId="servlet-api"/>
             <exclusion groupId="commons-logging" artifactId="commons-logging"/>
+            <exclusion groupId="commons-lang" artifactId="commons-lang"/>
             <exclusion groupId="org.eclipse.jdt" artifactId="core"/>
             <exclusion groupId="ant" artifactId="ant"/>
             <exclusion groupId="junit" artifactId="junit"/>
             <exclusion groupId="net.java.dev.jna" artifactId="jna" />
             <exclusion groupId="net.java.dev.jna" artifactId="jna-platform" />
           </dependency>
-          <dependency groupId="com.google.code.findbugs" artifactId="jsr305" version="2.0.2" scope="provided"/>
+          <dependency groupId="com.google.code.findbugs" artifactId="jsr305" version="2.0.2"/>
           <dependency groupId="com.clearspring.analytics" artifactId="stream" version="2.5.2">
             <exclusion groupId="it.unimi.dsi" artifactId="fastutil" />
           </dependency>
       <jvmarg value="-Dcassandra.tolerate_sstable_size=true"/>
       <jvmarg value="-Dcassandra.skip_sync=true" />
     </testmacro>
-    <testmacro inputdir="${test.simulator-test.src}" timeout="${test.distributed.timeout}" forkmode="perTest" showoutput="true" filter="**/test/${test.name}.java">
+  </target>
+
+  <target name="test-simulator-dtest" depends="build-test" description="Execute simulator dtests">
+    <testmacro inputdir="${test.simulator-test.src}" timeout="${test.simulation.timeout}" forkmode="perTest" showoutput="true" filter="**/test/${test.name}.java">
       <jvmarg value="-Dlogback.configurationFile=test/conf/logback-simulator.xml"/>
       <jvmarg value="-Dcassandra.ring_delay_ms=10000"/>
       <jvmarg value="-Dcassandra.tolerate_sstable_size=true"/>
       <jvmarg value="-Dcassandra.skip_sync=true" />
+      <jvmarg value="-Dcassandra.debugrefcount=false"/>
+      <jvmarg value="-Dcassandra.test.simulator.determinismcheck=strict"/>
       <!-- Support Simulator Tests -->
       <jvmarg line="-javaagent:${test.lib}/jars/simulator-asm.jar"/>
       <jvmarg line="-Xbootclasspath/a:${test.lib}/jars/simulator-bootstrap.jar"/>
       <jvmarg line="-XX:ActiveProcessorCount=4"/>
       <jvmarg line="-XX:-TieredCompilation"/>
+      <jvmarg line="-XX:-BackgroundCompilation"/>
+      <jvmarg line="-XX:CICompilerCount=1"/>
       <jvmarg line="-XX:Tier4CompileThreshold=1000"/>
       <jvmarg line="-XX:ReservedCodeCacheSize=256M"/>
+      <jvmarg line="-Xmx8G"/>
     </testmacro>
   </target>
 
index 9a71312e63a869c18f194072f289097a2f4f1404..8e2f90ee22cd1e6733ce4469c1dd5e5d0236cf3c 100644 (file)
        <property name="idFormat" value="blockSystemClock"/>
        <property name="influenceFormat" value="0"/>
     </module>
+
+    <module name="SuppressWithNearbyCommentFilter">
+      <property name="commentFormat" value="checkstyle: permit this invocation"/>
+      <property name="idFormat" value="blockPathToFile"/>
+      <property name="influenceFormat" value="0"/>
+    </module>
  
     <module name="RegexpSinglelineJava">
       <!-- block system time -->
     <module name="IllegalInstantiation">
       <property name="classes" value="java.io.File,java.lang.Thread,java.util.concurrent.FutureTask,java.util.concurrent.Semaphore,java.util.concurrent.CountDownLatch,java.util.concurrent.ScheduledThreadPoolExecutor,java.util.concurrent.ThreadPoolExecutor,java.util.concurrent.ForkJoinPool,java.lang.OutOfMemoryError"/>
     </module>
+
+    <module name="RegexpSinglelineJava">
+      <!-- block Path#toFile() -->
+      <property name="id" value="blockPathToFile"/>
+      <property name="format" value="toFile\(\)"/>
+      <property name="message" value="Avoid Path#toFile(), as some implementations may not support it." />
+    </module>
   </module>
 
 </module>
index eb1d1a97c021d57327b9788bfb08106b8095ae24..98d70a035fe2715122cadca4dfd13d74b0f2a1bd 100644 (file)
@@ -64,7 +64,7 @@ hinted_handoff_enabled: true
 # Min unit: ms
 max_hint_window: 3h
 
-# Maximum throttle in KBs per second, per delivery thread.  This will be
+# Maximum throttle in KiBs per second, per delivery thread.  This will be
 # reduced proportionally to the number of nodes in the cluster.  (If there
 # are two nodes in the cluster, each delivery thread will use the maximum
 # rate; if there are three, each will throttle to half of the maximum,
@@ -86,7 +86,7 @@ max_hints_delivery_threads: 2
 # Min unit: ms
 hints_flush_period: 10000ms
 
-# Maximum size for a single hints file, in megabytes.
+# Maximum size for a single hints file, in mebibytes.
 # Min unit: MiB
 max_hints_file_size: 128MiB
 
@@ -121,7 +121,7 @@ auto_hints_cleanup_enabled: false
 #
 # hint_window_persistent_enabled: true
 
-# Maximum throttle in KBs per second, total. This will be
+# Maximum throttle in KiBs per second, total. This will be
 # reduced proportionally to the number of nodes in the cluster.
 # Min unit: KiB
 batchlog_replay_throttle: 1024KiB
@@ -374,7 +374,7 @@ commit_failure_policy: stop
 #
 # Default value ("auto") is 1/256th of the heap or 10MiB, whichever is greater
 # Min unit: MiB
-prepared_statements_cache_size:
+prepared_statements_cache_size:
 
 # Maximum size of the key cache in memory.
 #
@@ -389,7 +389,7 @@ commit_failure_policy: stop
 #
 # Default value is empty to make it "auto" (min(5% of Heap (in MiB), 100MiB)). Set to 0 to disable key cache.
 # Min unit: MiB
-key_cache_size:
+key_cache_size:
 
 # Duration in seconds after which Cassandra should
 # save the key cache. Caches are saved to saved_caches_directory as
@@ -457,7 +457,7 @@ row_cache_save_period: 0s
 # Default value is empty to make it "auto" (min(2.5% of Heap (in MiB), 50MiB)). Set to 0 to disable counter cache.
 # NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
 # Min unit: MiB
-counter_cache_size:
+counter_cache_size:
 
 # Duration in seconds after which Cassandra should
 # save the counter cache (keys only). Caches are saved to saved_caches_directory as
@@ -659,7 +659,7 @@ memtable_allocation_type: heap_buffers
 # is 1/16th of the available heap. The main tradeoff is that smaller trees
 # have less resolution, which can lead to over-streaming data. If you see heap
 # pressure during repairs, consider lowering this, but you cannot go below
-# one megabyte. If you see lots of over-streaming, consider raising
+# one mebibyte. If you see lots of over-streaming, consider raising
 # this or using subrange repair.
 #
 # For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096.
@@ -731,11 +731,11 @@ memtable_allocation_type: heap_buffers
 # is a best-effort process. In extreme conditions Cassandra may need to use
 # more than this amount of memory.
 # Min unit: KiB
-index_summary_capacity:
+index_summary_capacity:
 
 # How frequently index summaries should be resampled.  This is done
 # periodically to redistribute memory from the fixed-size pool to sstables
-# proportional their recent read rates.  Setting to -1 will disable this
+# proportional their recent read rates.  Setting to null value will disable this
 # process, leaving existing index summaries at their current sampling level.
 # Min unit: m
 index_summary_resize_interval: 60m
@@ -1004,6 +1004,8 @@ compaction_throughput: 64MiB/s
 # are completely written, and used in place of the prior sstables for
 # any range that has been written. This helps to smoothly transfer reads 
 # between the sstables, reducing page cache churn and keeping hot rows hot
+# Set sstable_preemptive_open_interval to null for disabled which is equivalent to
+# sstable_preemptive_open_interval_in_mb being negative
 # Min unit: MiB
 sstable_preemptive_open_interval: 50MiB
 
@@ -1012,7 +1014,7 @@ sstable_preemptive_open_interval: 50MiB
 # set to true, each newly created sstable will have a UUID based generation identifier and such files are
 # not readable by previous Cassandra versions. At some point, this option will become true by default
 # and eventually get removed from the configuration.
-enable_uuid_sstable_identifiers: false
+uuid_sstable_identifiers_enabled: false
 
 # When enabled, permits Cassandra to zero-copy stream entire eligible
 # SSTables between nodes, including every component.
@@ -1724,6 +1726,9 @@ drop_compact_storage_enabled: false
 # Guardrail to allow/disallow TRUNCATE and DROP TABLE statements
 # drop_truncate_table_enabled: true
 #
+# Guardrail to allow/disallow DROP KEYSPACE statements
+# drop_keyspace_enabled: true
+#
 # Guardrail to warn or fail when using a page size greater than threshold.
 # The two thresholds default to -1 to disable.
 # page_size_warn_threshold: -1
index 5fef1a9a2745b40f261bb9f40f37fcb2b964c7ef..fde597052606fc86230cc140dda1902b6b397a8b 100644 (file)
@@ -1082,10 +1082,10 @@ bc(syntax)..
 
 <selector> ::= <identifier>
              | <term>
-             | WRITETIME '(' <identifier> ')'
-             | MAXWRITETIME '(' <identifier> ')'
+             | WRITETIME '(' <selector> ')'
+             | MAXWRITETIME '(' <selector> ')'
              | COUNT '(' '*' ')'
-             | TTL '(' <identifier> ')'
+             | TTL '(' <selector> ')'
              | CAST '(' <selector> AS <type> ')'
              | <function> '(' (<selector> (',' <selector>)*)? ')'
              | <selector> '.' <identifier>
index 1f89469a328d0732d556ffb01d2db46be278d284..df99a39ef64a7ac5d86f6709378be4b2140f384b 100644 (file)
@@ -2,6 +2,16 @@
 
 The following describes the changes in each version of CQL.
 
+== 3.4.6
+
+* Add support for IF EXISTS and IF NOT EXISTS in ALTER statements  (`16916`)
+* Allow GRANT/REVOKE multiple permissions in a single statement (`17030`)
+* Pre hashed passwords in CQL (`17334`)
+* Add support for type casting in WHERE clause components and in the values of INSERT/UPDATE statements (`14337`)
+* Add support for CONTAINS and CONTAINS KEY in conditional UPDATE and DELETE statement (`10537`)
+* Allow to grant permission for all tables in a keyspace (`17027`)
+* Allow to aggregate by time intervals (`11871`)
+
 == 3.4.5
 
 * Adds support for arithmetic operators (`11935`)
index af9dbba2d90dd8a7ca3c4a245bc5da5326af6fd0..513dc1d1e5643003a7cedf578ec62353f2ed9cc7 100644 (file)
@@ -79,17 +79,17 @@ You must use the orignal column name instead.
 
 Selection supports three special functions that aren't allowed anywhere
 else: `WRITETIME`, `MAXWRITETIME` and `TTL`.
-All functions take only one argument, a column name.
+All functions take only one argument, a column name. If the column is a collection or UDT, it's possible to add element
+selectors, such as `WRITETTIME(phones[2..4])` or `WRITETTIME(user.name)`.
 These functions retrieve meta-information that is stored internally for each column:
 
-* `WRITETIME` stores the timestamp of the value of the column. Note that this function cannot be applied to non-frozen collection
-and UDT.
+* `WRITETIME` stores the timestamp of the value of the column.
 * `MAXWRITETIME` stores the largest timestamp of the value of the column. For non-collection and non-UDT columns, `MAXWRITETIME`
 is equivalent to `WRITETIME`. In the other cases, it returns the largest timestamp of the values in the column.
 * `TTL` stores the remaining time to live (in seconds) for the value of the column if it is set to expire; otherwise the value is `null`.
 
-The `WRITETIME` and `TTL` functions can't be used on multi-cell columns such as non-frozen
-collections or non-frozen user-defined types.
+The `WRITETIME` and `TTL` functions can be used on multi-cell columns such as non-frozen collections or non-frozen
+user-defined types. In that case, the functions will return the list of timestamps or TTLs for each selected cell.
 
 [[where-clause]]
 === The `WHERE` clause
index aef6575d52e5c0c52e050470e2633f79d9e08ad3..df74db96d4b10b1b00eb67353a0957fe796a9d68 100644 (file)
@@ -1,23 +1,5 @@
 = Frequently Asked Questions
 
-* `why-cant-list-all`
-* `what-ports`
-* `what-happens-on-joins`
-* `asynch-deletes`
-* `one-entry-ring`
-* `can-large-blob`
-* `nodetool-connection-refused`
-* `to-batch-or-not-to-batch`
-* `selinux`
-* `how-to-unsubscribe`
-* `cassandra-eats-all-my-memory`
-* `what-are-seeds`
-* `are-seeds-SPOF`
-* `why-message-dropped`
-* `oom-map-failed`
-* `what-on-same-timestamp-update`
-* `why-bootstrapping-stream-error`
-
 [[why-cant-list-all]]
 == Why can't I set `listen_address` to listen on 0.0.0.0 (all my addresses)?
 
index 3e612580ba6367ec977c196cf76ba74ac3211738..7a7a4befa792e6f824a600501f3c8602a939b061 100644 (file)
@@ -124,7 +124,7 @@ cqlsh> SELECT * FROM system_views.clients;
 ------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  address          | 127.0.0.1
  port             | 50687
- client_options   | {'CQL_VERSION': '3.4.5', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
+ client_options   | {'CQL_VERSION': '3.4.6', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
  connection_stage | ready
  driver_name      | DataStax Python Driver
  driver_version   | 3.25.0
@@ -140,7 +140,7 @@ cqlsh> SELECT * FROM system_views.clients;
 ------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  address          | 127.0.0.1
  port             | 50688
- client_options   | {'CQL_VERSION': '3.4.5', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
+ client_options   | {'CQL_VERSION': '3.4.6', 'DRIVER_NAME': 'DataStax Python Driver', 'DRIVER_VERSION': '3.25.0'}
  connection_stage | ready
  driver_name      | DataStax Python Driver
  driver_version   | 3.25.0
index e3c76b8b9e08e75a52c7b267ed40d6e2cd1d4023..be2065690d609b8f04ea21774112e49a6d9b5465 100644 (file)
@@ -297,8 +297,7 @@ to compaction:
 `disableautocompaction`::
   Disable compaction.
 `setcompactionthroughput`::
-  How fast compaction should run at most - defaults to 16MB/s, but note
-  that it is likely not possible to reach this throughput.
+  How fast compaction should run at most - defaults to 64MiB/s.
 `compactionstats`::
   Statistics about current and pending compactions.
 `compactionhistory`::
index 719886d76638a3ec7063fda2d5b94d2f708dab28..6ba42672e476422dd425d3a5bef396472feccf79 100644 (file)
@@ -7,7 +7,7 @@
             <properties>
                 <property name="project.dir">..</property>
                 <!-- the compile classpaths should be distinct per compilation unit… but it is kept simple and the build will catch errors -->
-                <property name="cassandra.classpath.jars">${project.dir}/build/lib/jars/HdrHistogram-2.1.9.jar:${project.dir}/build/lib/jars/ST4-4.0.8.jar:${project.dir}/build/lib/jars/airline-0.8.jar:${project.dir}/build/lib/jars/antlr-3.5.2.jar:${project.dir}/build/lib/jars/antlr-runtime-3.5.2.jar:${project.dir}/build/lib/jars/asm-7.1.jar:${project.dir}/build/lib/jars/assertj-core-3.15.0.jar:${project.dir}/build/lib/jars/byteman-4.0.6.jar:${project.dir}/build/lib/jars/byteman-bmunit-4.0.6.jar:${project.dir}/build/lib/jars/byteman-install-4.0.6.jar:${project.dir}/build/lib/jars/byteman-submit-4.0.6.jar:${project.dir}/build/lib/jars/caffeine-2.3.5.jar:${project.dir}/build/lib/jars/cassandra-driver-core-3.11.0-shaded.jar:${project.dir}/build/lib/jars/chronicle-bytes-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-core-2.20.126.jar:${project.dir}/build/lib/jars/chronicle-queue-5.20.123.jar:${project.dir}/build/lib/jars/chronicle-threads-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-wire-2.20.117.jar:${project.dir}/build/lib/jars/commons-beanutils-1.7.0.jar:${project.dir}/build/lib/jars/commons-beanutils-core-1.8.0.jar:${project.dir}/build/lib/jars/commons-cli-1.1.jar:${project.dir}/build/lib/jars/commons-codec-1.9.jar:${project.dir}/build/lib/jars/commons-collections-3.2.1.jar:${project.dir}/build/lib/jars/commons-configuration-1.6.jar:${project.dir}/build/lib/jars/commons-digester-1.8.jar:${project.dir}/build/lib/jars/commons-el-1.0.jar:${project.dir}/build/lib/jars/commons-httpclient-3.0.1.jar:${project.dir}/build/lib/jars/commons-lang-2.4.jar:${project.dir}/build/lib/jars/commons-lang3-3.11.jar:${project.dir}/build/lib/jars/commons-math-2.1.jar:${project.dir}/build/lib/jars/commons-math3-3.2.jar:${project.dir}/build/lib/jars/commons-net-1.4.1.jar:${project.dir}/build/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/lib/jars/compress-lzf-0.8.4.jar:${project.dir}/build/lib/jars/concurrent-trees-2.4.0.jar:${project.dir}/build/lib/jars/ecj-4.6.1.jar:${project.dir}/build/lib/jars/ftplet-api-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-core-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-deprecated-1.0.0-M2.jar:${project.dir}/build/lib/jars/guava-27.0-jre.jar:${project.dir}/build/lib/jars/hadoop-core-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-minicluster-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-test-1.0.3.jar:${project.dir}/build/lib/jars/high-scale-lib-1.0.6.jar:${project.dir}/build/lib/jars/hppc-0.8.1.jar:${project.dir}/build/lib/jars/hsqldb-1.8.0.10.jar:${project.dir}/build/lib/jars/j2objc-annotations-1.3.jar:${project.dir}/build/lib/jars/jackson-annotations-2.9.10.jar:${project.dir}/build/lib/jars/jackson-core-2.9.10.jar:${project.dir}/build/lib/jars/jackson-core-asl-1.0.1.jar:${project.dir}/build/lib/jars/jackson-databind-2.9.10.8.jar:${project.dir}/build/lib/jars/jackson-mapper-asl-1.0.1.jar:${project.dir}/build/lib/jars/jacocoagent.jar:${project.dir}/build/lib/jars/jamm-0.3.2.jar:${project.dir}/build/lib/jars/jasper-compiler-5.5.12.jar:${project.dir}/build/lib/jars/jasper-runtime-5.5.12.jar:${project.dir}/build/lib/jars/java-cup-runtime-11b-20160615.jar:${project.dir}/build/lib/jars/javax.inject-1.jar:${project.dir}/build/lib/jars/jbcrypt-0.3m.jar:${project.dir}/build/lib/jars/jcl-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/jcommander-1.30.jar:${project.dir}/build/lib/jars/jctools-core-3.1.0.jar:${project.dir}/build/lib/jars/jersey-core-1.0.jar:${project.dir}/build/lib/jars/jersey-server-1.0.jar:${project.dir}/build/lib/jars/jets3t-0.7.1.jar:${project.dir}/build/lib/jars/jetty-6.1.26.jar:${project.dir}/build/lib/jars/jetty-util-6.1.26.jar:${project.dir}/build/lib/jars/jflex-1.8.2.jar:${project.dir}/build/lib/jars/jna-5.6.0.jar:${project.dir}/build/lib/jars/json-simple-1.1.jar:${project.dir}/build/lib/jars/jsp-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsp-api-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsr305-2.0.2.jar:${project.dir}/build/lib/jars/jsr311-api-1.0.jar:${project.dir}/build/lib/jars/jvm-attach-api-1.5.jar:${project.dir}/build/lib/jars/kfs-0.3.jar:${project.dir}/build/lib/jars/log4j-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/logback-classic-1.2.3.jar:${project.dir}/build/lib/jars/logback-core-1.2.3.jar:${project.dir}/build/lib/jars/lz4-java-1.7.1.jar:${project.dir}/build/lib/jars/metrics-core-3.1.5.jar:${project.dir}/build/lib/jars/metrics-jvm-3.1.5.jar:${project.dir}/build/lib/jars/metrics-logback-3.1.5.jar:${project.dir}/build/lib/jars/mina-core-2.0.0-M5.jar:${project.dir}/build/lib/jars/mxdump-0.14.jar:${project.dir}/build/lib/jars/netty-all-4.1.58.Final.jar:${project.dir}/build/lib/jars/netty-tcnative-boringssl-static-2.0.36.Final.jar:${project.dir}/build/lib/jars/ohc-core-0.5.1.jar:${project.dir}/build/lib/jars/ohc-core-j8-0.5.1.jar:${project.dir}/build/lib/jars/oro-2.0.8.jar:${project.dir}/build/lib/jars/psjava-0.1.19.jar:${project.dir}/build/lib/jars/reporter-config-base-3.0.3.jar:${project.dir}/build/lib/jars/reporter-config3-3.0.3.jar:${project.dir}/build/lib/jars/servlet-api-2.5-6.1.14.jar:${project.dir}/build/lib/jars/sigar-1.6.4.jar:${project.dir}/build/lib/jars/sjk-cli-0.14.jar:${project.dir}/build/lib/jars/sjk-core-0.14.jar:${project.dir}/build/lib/jars/sjk-json-0.14.jar:${project.dir}/build/lib/jars/sjk-stacktrace-0.14.jar:${project.dir}/build/lib/jars/slf4j-api-1.7.25.jar:${project.dir}/build/lib/jars/snakeyaml-1.26.jar:${project.dir}/build/lib/jars/snappy-java-1.1.2.6.jar:${project.dir}/build/lib/jars/snowball-stemmer-1.3.0.581.1.jar:${project.dir}/build/lib/jars/stream-2.5.2.jar:${project.dir}/build/lib/jars/xmlenc-0.52.jar:${project.dir}/build/lib/jars/zstd-jni-1.3.8-5.jar:${project.dir}/build/test/lib/jars/animal-sniffer-annotations-1.14.jar:${project.dir}/build/test/lib/jars/ant-1.9.7.jar:${project.dir}/build/test/lib/jars/ant-junit-1.9.7.jar:${project.dir}/build/test/lib/jars/ant-launcher-1.9.7.jar:${project.dir}/build/test/lib/jars/asm-6.0.jar:${project.dir}/build/test/lib/jars/asm-analysis-6.0.jar:${project.dir}/build/test/lib/jars/asm-commons-6.0.jar:${project.dir}/build/test/lib/jars/asm-tree-6.0.jar:${project.dir}/build/test/lib/jars/asm-util-6.0.jar:${project.dir}/build/test/lib/jars/asm-xml-6.0.jar:${project.dir}/build/test/lib/jars/assertj-core-3.15.0.jar:${project.dir}/build/test/lib/jars/awaitility-4.0.3.jar:${project.dir}/build/test/lib/jars/byte-buddy-1.10.5.jar:${project.dir}/build/test/lib/jars/byte-buddy-agent-1.10.5.jar:${project.dir}/build/test/lib/jars/byteman-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-bmunit-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-install-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-submit-4.0.6.jar:${project.dir}/build/test/lib/jars/checker-qual-2.0.0.jar:${project.dir}/build/test/lib/jars/commons-io-2.6.jar:${project.dir}/build/test/lib/jars/commons-math3-3.2.jar:${project.dir}/build/test/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/test/lib/jars/dtest-api-0.0.7.jar:${project.dir}/build/test/lib/jars/error_prone_annotations-2.0.18.jar:${project.dir}/build/test/lib/jars/guava-23.5-android.jar:${project.dir}/build/test/lib/jars/hamcrest-2.2.jar:${project.dir}/build/test/lib/jars/j2objc-annotations-1.1.jar:${project.dir}/build/test/lib/jars/java-allocation-instrumenter-3.1.0.jar:${project.dir}/build/test/lib/jars/javassist-3.26.0-GA.jar:${project.dir}/build/test/lib/jars/jmh-core-1.21.jar:${project.dir}/build/test/lib/jars/jmh-generator-annprocess-1.21.jar:${project.dir}/build/test/lib/jars/jopt-simple-4.6.jar:${project.dir}/build/test/lib/jars/jsr305-1.3.9.jar:${project.dir}/build/test/lib/jars/junit-4.12.jar:${project.dir}/build/test/lib/jars/mockito-core-3.2.4.jar:${project.dir}/build/test/lib/jars/objenesis-2.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.agent-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.ant-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.core-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.report-0.8.6.jar:${project.dir}/build/test/lib/jars/quicktheories-0.26.jar:${project.dir}/build/test/lib/jars/reflections-0.9.12.jar:${project.dir}/build/test/lib/jars/slf4j-api-1.7.25.jar:</property>
+                <property name="cassandra.classpath.jars">${project.dir}/build/lib/jars/HdrHistogram-2.1.9.jar:${project.dir}/build/lib/jars/ST4-4.0.8.jar:${project.dir}/build/lib/jars/airline-0.8.jar:${project.dir}/build/lib/jars/antlr-3.5.2.jar:${project.dir}/build/lib/jars/antlr-runtime-3.5.2.jar:${project.dir}/build/lib/jars/asm-7.1.jar:${project.dir}/build/lib/jars/assertj-core-3.15.0.jar:${project.dir}/build/lib/jars/byteman-4.0.6.jar:${project.dir}/build/lib/jars/byteman-bmunit-4.0.6.jar:${project.dir}/build/lib/jars/byteman-install-4.0.6.jar:${project.dir}/build/lib/jars/byteman-submit-4.0.6.jar:${project.dir}/build/lib/jars/caffeine-2.3.5.jar:${project.dir}/build/lib/jars/cassandra-driver-core-3.11.0-shaded.jar:${project.dir}/build/lib/jars/chronicle-bytes-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-core-2.20.126.jar:${project.dir}/build/lib/jars/chronicle-queue-5.20.123.jar:${project.dir}/build/lib/jars/chronicle-threads-2.20.111.jar:${project.dir}/build/lib/jars/chronicle-wire-2.20.117.jar:${project.dir}/build/lib/jars/commons-beanutils-1.7.0.jar:${project.dir}/build/lib/jars/commons-beanutils-core-1.8.0.jar:${project.dir}/build/lib/jars/commons-cli-1.1.jar:${project.dir}/build/lib/jars/commons-codec-1.9.jar:${project.dir}/build/lib/jars/commons-collections-3.2.1.jar:${project.dir}/build/lib/jars/commons-configuration-1.6.jar:${project.dir}/build/lib/jars/commons-digester-1.8.jar:${project.dir}/build/lib/jars/commons-el-1.0.jar:${project.dir}/build/lib/jars/commons-httpclient-3.0.1.jar:${project.dir}/build/lib/jars/commons-lang3-3.11.jar:${project.dir}/build/lib/jars/commons-math-2.1.jar:${project.dir}/build/lib/jars/commons-math3-3.2.jar:${project.dir}/build/lib/jars/commons-net-1.4.1.jar:${project.dir}/build/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/lib/jars/compress-lzf-0.8.4.jar:${project.dir}/build/lib/jars/concurrent-trees-2.4.0.jar:${project.dir}/build/lib/jars/ecj-4.6.1.jar:${project.dir}/build/lib/jars/ftplet-api-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-core-1.0.0.jar:${project.dir}/build/lib/jars/ftpserver-deprecated-1.0.0-M2.jar:${project.dir}/build/lib/jars/guava-27.0-jre.jar:${project.dir}/build/lib/jars/hadoop-core-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-minicluster-1.0.3.jar:${project.dir}/build/lib/jars/hadoop-test-1.0.3.jar:${project.dir}/build/lib/jars/high-scale-lib-1.0.6.jar:${project.dir}/build/lib/jars/hppc-0.8.1.jar:${project.dir}/build/lib/jars/hsqldb-1.8.0.10.jar:${project.dir}/build/lib/jars/j2objc-annotations-1.3.jar:${project.dir}/build/lib/jars/jackson-annotations-2.9.10.jar:${project.dir}/build/lib/jars/jackson-core-2.9.10.jar:${project.dir}/build/lib/jars/jackson-core-asl-1.0.1.jar:${project.dir}/build/lib/jars/jackson-databind-2.9.10.8.jar:${project.dir}/build/lib/jars/jackson-mapper-asl-1.0.1.jar:${project.dir}/build/lib/jars/jacocoagent.jar:${project.dir}/build/lib/jars/jamm-0.3.2.jar:${project.dir}/build/lib/jars/jasper-compiler-5.5.12.jar:${project.dir}/build/lib/jars/jasper-runtime-5.5.12.jar:${project.dir}/build/lib/jars/java-cup-runtime-11b-20160615.jar:${project.dir}/build/lib/jars/javax.inject-1.jar:${project.dir}/build/lib/jars/jbcrypt-0.3m.jar:${project.dir}/build/lib/jars/jcl-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/jcommander-1.30.jar:${project.dir}/build/lib/jars/jctools-core-3.1.0.jar:${project.dir}/build/lib/jars/jersey-core-1.0.jar:${project.dir}/build/lib/jars/jersey-server-1.0.jar:${project.dir}/build/lib/jars/jets3t-0.7.1.jar:${project.dir}/build/lib/jars/jetty-6.1.26.jar:${project.dir}/build/lib/jars/jetty-util-6.1.26.jar:${project.dir}/build/lib/jars/jflex-1.8.2.jar:${project.dir}/build/lib/jars/jna-5.6.0.jar:${project.dir}/build/lib/jars/json-simple-1.1.jar:${project.dir}/build/lib/jars/jsp-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsp-api-2.1-6.1.14.jar:${project.dir}/build/lib/jars/jsr305-2.0.2.jar:${project.dir}/build/lib/jars/jsr311-api-1.0.jar:${project.dir}/build/lib/jars/jvm-attach-api-1.5.jar:${project.dir}/build/lib/jars/kfs-0.3.jar:${project.dir}/build/lib/jars/log4j-over-slf4j-1.7.25.jar:${project.dir}/build/lib/jars/logback-classic-1.2.3.jar:${project.dir}/build/lib/jars/logback-core-1.2.3.jar:${project.dir}/build/lib/jars/lz4-java-1.7.1.jar:${project.dir}/build/lib/jars/metrics-core-3.1.5.jar:${project.dir}/build/lib/jars/metrics-jvm-3.1.5.jar:${project.dir}/build/lib/jars/metrics-logback-3.1.5.jar:${project.dir}/build/lib/jars/mina-core-2.0.0-M5.jar:${project.dir}/build/lib/jars/mxdump-0.14.jar:${project.dir}/build/lib/jars/netty-all-4.1.58.Final.jar:${project.dir}/build/lib/jars/netty-tcnative-boringssl-static-2.0.36.Final.jar:${project.dir}/build/lib/jars/ohc-core-0.5.1.jar:${project.dir}/build/lib/jars/ohc-core-j8-0.5.1.jar:${project.dir}/build/lib/jars/oro-2.0.8.jar:${project.dir}/build/lib/jars/psjava-0.1.19.jar:${project.dir}/build/lib/jars/reporter-config-base-3.0.3.jar:${project.dir}/build/lib/jars/reporter-config3-3.0.3.jar:${project.dir}/build/lib/jars/servlet-api-2.5-6.1.14.jar:${project.dir}/build/lib/jars/sigar-1.6.4.jar:${project.dir}/build/lib/jars/sjk-cli-0.14.jar:${project.dir}/build/lib/jars/sjk-core-0.14.jar:${project.dir}/build/lib/jars/sjk-json-0.14.jar:${project.dir}/build/lib/jars/sjk-stacktrace-0.14.jar:${project.dir}/build/lib/jars/slf4j-api-1.7.25.jar:${project.dir}/build/lib/jars/snakeyaml-1.26.jar:${project.dir}/build/lib/jars/snappy-java-1.1.2.6.jar:${project.dir}/build/lib/jars/snowball-stemmer-1.3.0.581.1.jar:${project.dir}/build/lib/jars/stream-2.5.2.jar:${project.dir}/build/lib/jars/xmlenc-0.52.jar:${project.dir}/build/lib/jars/zstd-jni-1.3.8-5.jar:${project.dir}/build/test/lib/jars/animal-sniffer-annotations-1.14.jar:${project.dir}/build/test/lib/jars/ant-1.9.7.jar:${project.dir}/build/test/lib/jars/ant-junit-1.9.7.jar:${project.dir}/build/test/lib/jars/ant-launcher-1.9.7.jar:${project.dir}/build/test/lib/jars/asm-6.0.jar:${project.dir}/build/test/lib/jars/asm-analysis-6.0.jar:${project.dir}/build/test/lib/jars/asm-commons-6.0.jar:${project.dir}/build/test/lib/jars/asm-tree-6.0.jar:${project.dir}/build/test/lib/jars/asm-util-6.0.jar:${project.dir}/build/test/lib/jars/asm-xml-6.0.jar:${project.dir}/build/test/lib/jars/assertj-core-3.15.0.jar:${project.dir}/build/test/lib/jars/awaitility-4.0.3.jar:${project.dir}/build/test/lib/jars/byte-buddy-1.10.5.jar:${project.dir}/build/test/lib/jars/byte-buddy-agent-1.10.5.jar:${project.dir}/build/test/lib/jars/byteman-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-bmunit-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-install-4.0.6.jar:${project.dir}/build/test/lib/jars/byteman-submit-4.0.6.jar:${project.dir}/build/test/lib/jars/checker-qual-2.0.0.jar:${project.dir}/build/test/lib/jars/commons-io-2.6.jar:${project.dir}/build/test/lib/jars/commons-math3-3.2.jar:${project.dir}/build/test/lib/jars/compile-command-annotations-1.2.0.jar:${project.dir}/build/test/lib/jars/dtest-api-0.0.7.jar:${project.dir}/build/test/lib/jars/error_prone_annotations-2.0.18.jar:${project.dir}/build/test/lib/jars/guava-23.5-android.jar:${project.dir}/build/test/lib/jars/hamcrest-2.2.jar:${project.dir}/build/test/lib/jars/j2objc-annotations-1.1.jar:${project.dir}/build/test/lib/jars/java-allocation-instrumenter-3.1.0.jar:${project.dir}/build/test/lib/jars/javassist-3.26.0-GA.jar:${project.dir}/build/test/lib/jars/jmh-core-1.21.jar:${project.dir}/build/test/lib/jars/jmh-generator-annprocess-1.21.jar:${project.dir}/build/test/lib/jars/jopt-simple-4.6.jar:${project.dir}/build/test/lib/jars/jsr305-1.3.9.jar:${project.dir}/build/test/lib/jars/junit-4.12.jar:${project.dir}/build/test/lib/jars/mockito-core-3.2.4.jar:${project.dir}/build/test/lib/jars/objenesis-2.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.agent-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.ant-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.core-0.8.6.jar:${project.dir}/build/test/lib/jars/org.jacoco.report-0.8.6.jar:${project.dir}/build/test/lib/jars/quicktheories-0.26.jar:${project.dir}/build/test/lib/jars/reflections-0.9.12.jar:${project.dir}/build/test/lib/jars/slf4j-api-1.7.25.jar:</property>
             </properties>
             <folders>
                 <source-folder>
index b49a29aebdaf2db719d66bcda1700d1320488bd4..39bc060485da5332fbd0f6e5915f7c8f3a6ef3b8 100644 (file)
@@ -326,19 +326,9 @@ def format_integer_type(val, colormap, thousands_sep=None, **_):
     return colorme(bval, colormap, 'int')
 
 
-# We can get rid of this in cassandra-2.2
-if sys.version_info >= (2, 7):
-    def format_integer_with_thousands_sep(val, thousands_sep=','):
-        return "{:,.0f}".format(val).replace(',', thousands_sep)
-else:
-    def format_integer_with_thousands_sep(val, thousands_sep=','):
-        if val < 0:
-            return '-' + format_integer_with_thousands_sep(-val, thousands_sep)
-        result = ''
-        while val >= 1000:
-            val, r = divmod(val, 1000)
-            result = "%s%03d%s" % (thousands_sep, r, result)
-        return "%d%s" % (val, result)
+def format_integer_with_thousands_sep(val, thousands_sep=','):
+    return "{:,.0f}".format(val).replace(',', thousands_sep)
+
 
 formatter_for('long')(format_integer_type)
 formatter_for('int')(format_integer_type)
index 69f31dced77092373bdb4ef9b679ce7f1c151e15..c1fd55edbfd6f9eef94933267496eaf79ec5756d 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+"""Pylexotron uses Python's re.Scanner module as a simple regex-based tokenizer for BNF production rules"""
+
 import re
+import inspect
+import sys
+from typing import Union
 
 from cqlshlib.saferscanner import SaferScanner
 
@@ -56,8 +61,8 @@ class Hint:
         return '%s(%r)' % (self.__class__, self.text)
 
 
-def is_hint(x):
-    return isinstance(x, Hint)
+def is_hint(obj):
+    return isinstance(obj, Hint)
 
 
 class ParseContext:
@@ -115,7 +120,7 @@ class ParseContext:
                % (self.__class__.__name__, self.matched, self.remainder, self.productionname, self.bindings)
 
 
-class matcher:
+class Matcher:
 
     def __init__(self, arg):
         self.arg = arg
@@ -155,38 +160,38 @@ class matcher:
         return '%s(%r)' % (self.__class__.__name__, self.arg)
 
 
-class choice(matcher):
+class Choice(Matcher):
 
     def match(self, ctxt, completions):
         foundctxts = []
-        for a in self.arg:
-            subctxts = a.match(ctxt, completions)
+        for each in self.arg:
+            subctxts = each.match(ctxt, completions)
             foundctxts.extend(subctxts)
         return foundctxts
 
 
-class one_or_none(matcher):
+class OneOrNone(Matcher):
 
     def match(self, ctxt, completions):
         return [ctxt] + list(self.arg.match(ctxt, completions))
 
 
-class repeat(matcher):
+class Repeat(Matcher):
 
     def match(self, ctxt, completions):
         found = [ctxt]
         ctxts = [ctxt]
         while True:
             new_ctxts = []
-            for c in ctxts:
-                new_ctxts.extend(self.arg.match(c, completions))
+            for each in ctxts:
+                new_ctxts.extend(self.arg.match(each, completions))
             if not new_ctxts:
                 return found
             found.extend(new_ctxts)
             ctxts = new_ctxts
 
 
-class rule_reference(matcher):
+class RuleReference(Matcher):
 
     def match(self, ctxt, completions):
         prevname = ctxt.productionname
@@ -198,24 +203,24 @@ class rule_reference(matcher):
         return [c.with_production_named(prevname) for c in output]
 
 
-class rule_series(matcher):
+class RuleSeries(Matcher):
 
     def match(self, ctxt, completions):
         ctxts = [ctxt]
         for patpiece in self.arg:
             new_ctxts = []
-            for c in ctxts:
-                new_ctxts.extend(patpiece.match(c, completions))
+            for each in ctxts:
+                new_ctxts.extend(patpiece.match(each, completions))
             if not new_ctxts:
                 return ()
             ctxts = new_ctxts
         return ctxts
 
 
-class named_symbol(matcher):
+class NamedSymbol(Matcher):
 
     def __init__(self, name, arg):
-        matcher.__init__(self, arg)
+        Matcher.__init__(self, arg)
         self.name = name
 
     def match(self, ctxt, completions):
@@ -224,13 +229,14 @@ class named_symbol(matcher):
             # don't collect other completions under this; use a dummy
             pass_in_compls = set()
         results = self.arg.match_with_results(ctxt, pass_in_compls)
-        return [c.with_binding(self.name, ctxt.extract_orig(matchtoks)) for (c, matchtoks) in results]
+        return [c.with_binding(self.name, ctxt.extract_orig(matchtoks))
+                for (c, matchtoks) in results]
 
     def __repr__(self):
         return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.arg)
 
 
-class named_collector(named_symbol):
+class NamedCollector(NamedSymbol):
 
     def match(self, ctxt, completions):
         pass_in_compls = completions
@@ -244,18 +250,21 @@ class named_collector(named_symbol):
         return output
 
 
-class terminal_matcher(matcher):
+class TerminalMatcher(Matcher):
+
+    def match(self, ctxt, completions):
+        raise NotImplementedError
 
     def pattern(self):
         raise NotImplementedError
 
 
-class regex_rule(terminal_matcher):
+class RegexRule(TerminalMatcher):
 
     def __init__(self, pat):
-        terminal_matcher.__init__(self, pat)
+        TerminalMatcher.__init__(self, pat)
         self.regex = pat
-        self.re = re.compile(pat + '$', re.I | re.S)
+        self.re = re.compile(pat + '$', re.IGNORECASE | re.DOTALL)
 
     def match(self, ctxt, completions):
         if ctxt.remainder:
@@ -269,12 +278,12 @@ class regex_rule(terminal_matcher):
         return self.regex
 
 
-class text_match(terminal_matcher):
+class TextMatch(TerminalMatcher):
     alpha_re = re.compile(r'[a-zA-Z]')
 
     def __init__(self, text):
         try:
-            terminal_matcher.__init__(self, eval(text))
+            TerminalMatcher.__init__(self, eval(text))
         except SyntaxError:
             print("bad syntax %r" % (text,))
 
@@ -289,12 +298,13 @@ class text_match(terminal_matcher):
     def pattern(self):
         # can't use (?i) here- Scanner component regex flags won't be applied
         def ignorecaseify(matchobj):
-            c = matchobj.group(0)
-            return '[%s%s]' % (c.upper(), c.lower())
+            val = matchobj.group(0)
+            return '[%s%s]' % (val.upper(), val.lower())
+
         return self.alpha_re.sub(ignorecaseify, re.escape(self.arg))
 
 
-class case_match(text_match):
+class CaseMatch(TextMatch):
 
     def match(self, ctxt, completions):
         if ctxt.remainder:
@@ -308,22 +318,22 @@ class case_match(text_match):
         return re.escape(self.arg)
 
 
-class word_match(text_match):
+class WordMatch(TextMatch):
 
     def pattern(self):
-        return r'\b' + text_match.pattern(self) + r'\b'
+        return r'\b' + TextMatch.pattern(self) + r'\b'
 
 
-class case_word_match(case_match):
+class CaseWordMatch(CaseMatch):
 
     def pattern(self):
-        return r'\b' + case_match.pattern(self) + r'\b'
+        return r'\b' + CaseMatch.pattern(self) + r'\b'
 
 
-class terminal_type_matcher(matcher):
+class TerminalTypeMatcher(Matcher):
 
     def __init__(self, tokentype, submatcher):
-        matcher.__init__(self, tokentype)
+        Matcher.__init__(self, tokentype)
         self.tokentype = tokentype
         self.submatcher = submatcher
 
@@ -340,18 +350,24 @@ class terminal_type_matcher(matcher):
 
 
 class ParsingRuleSet:
+    """Define the BNF tokenization rules for cql3handling.syntax_rules. Backus-Naur Form consists of
+       - Production rules in the form: Left-Hand-Side ::= Right-Hand-Side.  The LHS is a non-terminal.
+       - Productions or non-terminal symbols
+       - Terminal symbols.  Every terminal is a single token.
+    """
+
     RuleSpecScanner = SaferScanner([
-        (r'::=', lambda s, t: t),
+        (r'::=', lambda s, t: t),                   # BNF rule definition
         (r'\[[a-z0-9_]+\]=', lambda s, t: ('named_collector', t[1:-2])),
         (r'[a-z0-9_]+=', lambda s, t: ('named_symbol', t[:-1])),
         (r'/(\[\^?.[^]]*\]|[^/]|\\.)*/', lambda s, t: ('regex', t[1:-1].replace(r'\/', '/'))),
-        (r'"([^"]|\\.)*"', lambda s, t: ('litstring', t)),
+        (r'"([^"]|\\.)*"', lambda s, t: ('string_literal', t)),
         (r'<[^>]*>', lambda s, t: ('reference', t[1:-1])),
         (r'\bJUNK\b', lambda s, t: ('junk', t)),
         (r'[@()|?*;]', lambda s, t: t),
-        (r'\s+', None),
+        (r'\s+', None),                             # whitespace
         (r'#[^\n]*', None),
-    ], re.I | re.S | re.U)
+    ], re.IGNORECASE | re.DOTALL | re.UNICODE)
 
     def __init__(self):
         self.ruleset = {}
@@ -368,7 +384,7 @@ class ParsingRuleSet:
     def parse_rules(cls, rulestr):
         tokens, unmatched = cls.RuleSpecScanner.scan(rulestr)
         if unmatched:
-            raise LexingError.from_text(rulestr, unmatched, msg="Syntax rules unparseable")
+            raise LexingError.from_text(rulestr, unmatched, msg="Syntax rules are unparseable")
         rules = {}
         terminals = []
         tokeniter = iter(tokens)
@@ -379,9 +395,9 @@ class ParsingRuleSet:
                     raise ValueError('Unexpected token %r; expected "::="' % (assign,))
                 name = t[1]
                 production = cls.read_rule_tokens_until(';', tokeniter)
-                if isinstance(production, terminal_matcher):
+                if isinstance(production, TerminalMatcher):
                     terminals.append((name, production))
-                    production = terminal_type_matcher(name, production)
+                    production = TerminalTypeMatcher(name, production)
                 rules[name] = production
             else:
                 raise ValueError('Unexpected token %r; expected name' % (t,))
@@ -392,11 +408,11 @@ class ParsingRuleSet:
         if isinstance(pieces, (tuple, list)):
             if len(pieces) == 1:
                 return pieces[0]
-            return rule_series(pieces)
+            return RuleSeries(pieces)
         return pieces
 
     @classmethod
-    def read_rule_tokens_until(cls, endtoks, tokeniter):
+    def read_rule_tokens_until(cls, endtoks: Union[str, int], tokeniter):
         if isinstance(endtoks, str):
             endtoks = (endtoks,)
         counttarget = None
@@ -411,32 +427,32 @@ class ParsingRuleSet:
             if t in endtoks:
                 if len(mybranches) == 1:
                     return cls.mkrule(mybranches[0])
-                return choice(list(map(cls.mkrule, mybranches)))
+                return Choice(list(map(cls.mkrule, mybranches)))
             if isinstance(t, tuple):
                 if t[0] == 'reference':
-                    t = rule_reference(t[1])
-                elif t[0] == 'litstring':
+                    t = RuleReference(t[1])
+                elif t[0] == 'string_literal':
                     if t[1][1].isalnum() or t[1][1] == '_':
-                        t = word_match(t[1])
+                        t = WordMatch(t[1])
                     else:
-                        t = text_match(t[1])
+                        t = TextMatch(t[1])
                 elif t[0] == 'regex':
-                    t = regex_rule(t[1])
+                    t = RegexRule(t[1])
                 elif t[0] == 'named_collector':
-                    t = named_collector(t[1], cls.read_rule_tokens_until(1, tokeniter))
+                    t = NamedCollector(t[1], cls.read_rule_tokens_until(1, tokeniter))
                 elif t[0] == 'named_symbol':
-                    t = named_symbol(t[1], cls.read_rule_tokens_until(1, tokeniter))
+                    t = NamedSymbol(t[1], cls.read_rule_tokens_until(1, tokeniter))
             elif t == '(':
                 t = cls.read_rule_tokens_until(')', tokeniter)
             elif t == '?':
-                t = one_or_none(myrules.pop(-1))
+                t = OneOrNone(myrules.pop(-1))
             elif t == '*':
-                t = repeat(myrules.pop(-1))
+                t = Repeat(myrules.pop(-1))
             elif t == '@':
-                x = next(tokeniter)
-                if not isinstance(x, tuple) or x[0] != 'litstring':
-                    raise ValueError("Unexpected token %r following '@'" % (x,))
-                t = case_match(x[1])
+                val = next(tokeniter)
+                if not isinstance(val, tuple) or val[0] != 'string_literal':
+                    raise ValueError("Unexpected token %r following '@'" % (val,))
+                t = CaseMatch(val[1])
             elif t == '|':
                 myrules = []
                 mybranches.append(myrules)
@@ -447,7 +463,7 @@ class ParsingRuleSet:
             if countsofar == counttarget:
                 if len(mybranches) == 1:
                     return cls.mkrule(mybranches[0])
-                return choice(list(map(cls.mkrule, mybranches)))
+                return Choice(list(map(cls.mkrule, mybranches)))
         raise ValueError('Unexpected end of rule tokens')
 
     def append_rules(self, rulestr):
@@ -465,8 +481,9 @@ class ParsingRuleSet:
             if name == 'JUNK':
                 return None
             return lambda s, t: (name, t, s.match.span())
+
         regexes = [(p.pattern(), make_handler(name)) for (name, p) in self.terminals]
-        return SaferScanner(regexes, re.I | re.S | re.U).scan
+        return SaferScanner(regexes, re.IGNORECASE | re.DOTALL | re.UNICODE).scan
 
     def lex(self, text):
         if self.scanner is None:
@@ -487,9 +504,9 @@ class ParsingRuleSet:
         bindings = {}
         if srcstr is not None:
             bindings['*SRC*'] = srcstr
-        for c in self.parse(startsymbol, tokens, init_bindings=bindings):
-            if not c.remainder:
-                return c
+        for val in self.parse(startsymbol, tokens, init_bindings=bindings):
+            if not val.remainder:
+                return val
 
     def lex_and_parse(self, text, startsymbol='Start'):
         return self.parse(startsymbol, self.lex(text), init_bindings={'*SRC*': text})
@@ -511,9 +528,6 @@ class ParsingRuleSet:
         return completions
 
 
-import sys
-
-
 class Debugotron(set):
     depth = 10
 
@@ -525,9 +539,9 @@ class Debugotron(set):
         self._note_addition(item)
         set.add(self, item)
 
-    def _note_addition(self, foo):
-        self.stream.write("\nitem %r added by:\n" % (foo,))
-        frame = sys._getframe().f_back.f_back
+    def _note_addition(self, item):
+        self.stream.write("\nitem %r added by:\n" % (item,))
+        frame = inspect.currentframe().f_back.f_back
         for i in range(self.depth):
             name = frame.f_code.co_name
             filename = frame.f_code.co_filename
diff --git a/redhat/noboolean/README b/redhat/noboolean/README
new file mode 100644 (file)
index 0000000..33ab959
--- /dev/null
@@ -0,0 +1,23 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+These files create the 'noboolean' rpm packaging, using the same procedure as normal.
+These differ from the other packages by not using boolean dependency logic, intended for
+systems using rpmlib < 4.13.
+
+See CASSANDRA-17765 for more information.
diff --git a/redhat/noboolean/cassandra b/redhat/noboolean/cassandra
new file mode 120000 (symlink)
index 0000000..d9af9ad
--- /dev/null
@@ -0,0 +1 @@
+../cassandra
\ No newline at end of file
diff --git a/redhat/noboolean/cassandra.conf b/redhat/noboolean/cassandra.conf
new file mode 120000 (symlink)
index 0000000..7c12fb6
--- /dev/null
@@ -0,0 +1 @@
+../cassandra.conf
\ No newline at end of file
diff --git a/redhat/noboolean/cassandra.in.sh b/redhat/noboolean/cassandra.in.sh
new file mode 120000 (symlink)
index 0000000..115b45b
--- /dev/null
@@ -0,0 +1 @@
+../cassandra.in.sh
\ No newline at end of file
diff --git a/redhat/noboolean/cassandra.spec b/redhat/noboolean/cassandra.spec
new file mode 100644 (file)
index 0000000..8c04fdb
--- /dev/null
@@ -0,0 +1,211 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+%define __jar_repack %{nil}
+# Turn off the brp-python-bytecompile script
+%global __os_install_post %(echo '%{__os_install_post}' | sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g')
+
+# rpmbuild should not barf when it spots we ship
+# binary executable files in our 'noarch' package
+%define _binaries_in_noarch_packages_terminate_build   0
+
+%define __python /usr/bin/python3
+
+%global username cassandra
+
+# input of ~alphaN, ~betaN, ~rcN package versions need to retain upstream '-alphaN, etc' version for sources
+%define upstream_version %(echo %{version} | sed -r 's/~/-/g')
+%define relname apache-cassandra-%{upstream_version}
+
+Name:          cassandra
+Version:       %{version}
+Release:       %{revision}
+Summary:       Cassandra is a highly scalable, eventually consistent, distributed, structured key-value store.
+
+Group:         Development/Libraries
+License:       Apache Software License 2.0
+URL:           http://cassandra.apache.org/
+Source0:       %{relname}-src.tar.gz
+BuildRoot:     %{_tmppath}/%{relname}root-%(%{__id_u} -n)
+
+BuildRequires: ant >= 1.9
+BuildRequires: ant-junit >= 1.9
+
+Requires:      jre >= 1.8.0
+Requires:      python(abi) >= 3.6
+Requires:      procps-ng >= 3.3
+Requires(pre): user(cassandra)
+Requires(pre): group(cassandra)
+Requires(pre): shadow-utils
+Provides:      user(cassandra)
+Provides:      group(cassandra)
+
+BuildArch:     noarch
+
+# Don't examine the .so files we bundle for dependencies
+AutoReqProv:   no
+
+%description
+Cassandra is a distributed (peer-to-peer) system for the management and storage of structured data.
+
+%prep
+%setup -q -n %{relname}-src
+
+%build
+export LANG=en_US.UTF-8
+export JAVA_TOOL_OPTIONS="-Dfile.encoding=UTF-8"
+ant clean jar -Dversion=%{upstream_version}
+
+%install
+%{__rm} -rf %{buildroot}
+mkdir -p %{buildroot}/%{_sysconfdir}/%{username}
+mkdir -p %{buildroot}/usr/share/%{username}
+mkdir -p %{buildroot}/usr/share/%{username}/lib
+mkdir -p %{buildroot}/%{_sysconfdir}/%{username}/default.conf
+mkdir -p %{buildroot}/%{_sysconfdir}/rc.d/init.d
+mkdir -p %{buildroot}/%{_sysconfdir}/security/limits.d
+mkdir -p %{buildroot}/%{_sysconfdir}/default
+mkdir -p %{buildroot}/usr/sbin
+mkdir -p %{buildroot}/usr/bin
+mkdir -p %{buildroot}/var/lib/%{username}/commitlog
+mkdir -p %{buildroot}/var/lib/%{username}/data
+mkdir -p %{buildroot}/var/lib/%{username}/saved_caches
+mkdir -p %{buildroot}/var/lib/%{username}/hints
+mkdir -p %{buildroot}/var/run/%{username}
+mkdir -p %{buildroot}/var/log/%{username}
+( cd pylib && %{__python} setup.py install --no-compile --root %{buildroot}; )
+
+# patches for data and log paths
+patch -p1 < debian/patches/cassandra_yaml_dirs.diff
+patch -p1 < debian/patches/cassandra_logdir_fix.diff
+# uncomment hints_directory path
+sed -i 's/^# hints_directory:/hints_directory:/' conf/cassandra.yaml
+
+# remove other files not being installed
+rm -f bin/*.orig
+rm -f bin/cassandra.in.sh
+rm -f lib/sigar-bin/*winnt*  # strip segfaults on dll..
+rm -f tools/bin/cassandra.in.sh
+
+# copy default configs
+cp -pr conf/* %{buildroot}/%{_sysconfdir}/%{username}/default.conf/
+
+# step on default config with our redhat one
+cp -p redhat/%{username}.in.sh %{buildroot}/usr/share/%{username}/%{username}.in.sh
+cp -p redhat/%{username} %{buildroot}/%{_sysconfdir}/rc.d/init.d/%{username}
+cp -p redhat/%{username}.conf %{buildroot}/%{_sysconfdir}/security/limits.d/
+cp -p redhat/default %{buildroot}/%{_sysconfdir}/default/%{username}
+
+# copy cassandra bundled libs
+cp -pr lib/* %{buildroot}/usr/share/%{username}/lib/
+
+# copy stress jar
+cp -p build/tools/lib/stress.jar %{buildroot}/usr/share/%{username}/
+
+# copy fqltool jar
+cp -p build/tools/lib/fqltool.jar %{buildroot}/usr/share/%{username}/
+
+# copy binaries
+mv bin/cassandra %{buildroot}/usr/sbin/
+cp -p bin/* %{buildroot}/usr/bin/
+cp -p tools/bin/* %{buildroot}/usr/bin/
+
+# copy cassandra jar
+cp build/apache-cassandra-%{upstream_version}.jar %{buildroot}/usr/share/%{username}/
+
+%clean
+%{__rm} -rf %{buildroot}
+
+%pre
+getent group %{username} >/dev/null || groupadd -r %{username}
+getent passwd %{username} >/dev/null || \
+useradd -d /var/lib/%{username} -g %{username} -M -r %{username}
+exit 0
+
+%files
+%defattr(0644,root,root,0755)
+%doc CHANGES.txt LICENSE.txt README.asc NEWS.txt NOTICE.txt CASSANDRA-14092.txt
+%attr(755,root,root) %{_bindir}/auditlogviewer
+%attr(755,root,root) %{_bindir}/jmxtool
+%attr(755,root,root) %{_bindir}/cassandra-stress
+%attr(755,root,root) %{_bindir}/cqlsh
+%attr(755,root,root) %{_bindir}/cqlsh.py
+%attr(755,root,root) %{_bindir}/debug-cql
+%attr(755,root,root) %{_bindir}/fqltool
+%attr(755,root,root) %{_bindir}/generatetokens
+%attr(755,root,root) %{_bindir}/nodetool
+%attr(755,root,root) %{_bindir}/sstableloader
+%attr(755,root,root) %{_bindir}/sstablescrub
+%attr(755,root,root) %{_bindir}/sstableupgrade
+%attr(755,root,root) %{_bindir}/sstableutil
+%attr(755,root,root) %{_bindir}/sstableverify
+%attr(755,root,root) %{_bindir}/stop-server
+%attr(755,root,root) %{_sbindir}/cassandra
+%attr(755,root,root) /%{_sysconfdir}/rc.d/init.d/%{username}
+%{_sysconfdir}/default/%{username}
+%{_sysconfdir}/security/limits.d/%{username}.conf
+/usr/share/%{username}*
+%config(noreplace) /%{_sysconfdir}/%{username}
+%attr(750,%{username},%{username}) %config(noreplace) /var/lib/%{username}/*
+%attr(750,%{username},%{username}) /var/log/%{username}*
+%attr(750,%{username},%{username}) /var/run/%{username}*
+%{python_sitelib}/cqlshlib/
+%{python_sitelib}/cassandra_pylib*.egg-info
+
+%post
+alternatives --install /%{_sysconfdir}/%{username}/conf %{username} /%{_sysconfdir}/%{username}/default.conf/ 0
+exit 0
+
+%preun
+# only delete alternative on removal, not upgrade
+if [ "$1" = "0" ]; then
+    alternatives --remove %{username} /%{_sysconfdir}/%{username}/default.conf/
+fi
+exit 0
+
+
+%package tools
+Summary:       Extra tools for Cassandra. Cassandra is a highly scalable, eventually consistent, distributed, structured key-value store.
+Group:         Development/Libraries
+Requires:      cassandra = %{version}-%{revision}
+
+%description tools
+Cassandra is a distributed (peer-to-peer) system for the management and storage of structured data.
+.
+This package contains extra tools for working with Cassandra clusters.
+
+%files tools
+%attr(755,root,root) %{_bindir}/sstabledump
+%attr(755,root,root) %{_bindir}/compaction-stress
+%attr(755,root,root) %{_bindir}/sstableexpiredblockers
+%attr(755,root,root) %{_bindir}/sstablelevelreset
+%attr(755,root,root) %{_bindir}/sstablemetadata
+%attr(755,root,root) %{_bindir}/sstableofflinerelevel
+%attr(755,root,root) %{_bindir}/sstablerepairedset
+%attr(755,root,root) %{_bindir}/sstablesplit
+%attr(755,root,root) %{_bindir}/auditlogviewer
+%attr(755,root,root) %{_bindir}/jmxtool
+%attr(755,root,root) %{_bindir}/fqltool
+%attr(755,root,root) %{_bindir}/generatetokens
+%attr(755,root,root) %{_bindir}/hash_password
+
+
+%changelog
+* Mon Dec 05 2016 Michael Shuler <mshuler@apache.org>
+- 2.1.17, 2.2.9, 3.0.11, 3.10
+- Reintroduce RPM packaging
diff --git a/redhat/noboolean/default b/redhat/noboolean/default
new file mode 120000 (symlink)
index 0000000..446d58f
--- /dev/null
@@ -0,0 +1 @@
+../default
\ No newline at end of file
index 2643e0a6b56719de0725871287227766e0bdd539..b349e165275520f95b73f3ccd1e6c6bb46557639 100644 (file)
@@ -414,12 +414,12 @@ simpleUnaliasedSelector returns [Selectable.Raw s]
     ;
 
 selectionFunction returns [Selectable.Raw s]
-    : K_COUNT '(' '\*' ')'                      { $s = Selectable.WithFunction.Raw.newCountRowsFunction(); }
-    | K_MAXWRITETIME '(' c=sident ')'           { $s = new Selectable.WritetimeOrTTL.Raw(c, Selectable.WritetimeOrTTL.Kind.MAX_WRITE_TIME); }
-    | K_WRITETIME '(' c=sident ')'              { $s = new Selectable.WritetimeOrTTL.Raw(c, Selectable.WritetimeOrTTL.Kind.WRITE_TIME); }
-    | K_TTL       '(' c=sident ')'              { $s = new Selectable.WritetimeOrTTL.Raw(c, Selectable.WritetimeOrTTL.Kind.TTL); }
-    | K_CAST      '(' sn=unaliasedSelector K_AS t=native_type ')' {$s = new Selectable.WithCast.Raw(sn, t);}
-    | f=functionName args=selectionFunctionArgs { $s = new Selectable.WithFunction.Raw(f, args); }
+    : K_COUNT        '(' '\*' ')'                                    { $s = Selectable.WithFunction.Raw.newCountRowsFunction(); }
+    | K_MAXWRITETIME '(' c=sident m=selectorModifier[c] ')'          { $s = new Selectable.WritetimeOrTTL.Raw(c, m, Selectable.WritetimeOrTTL.Kind.MAX_WRITE_TIME); }
+    | K_WRITETIME    '(' c=sident m=selectorModifier[c] ')'          { $s = new Selectable.WritetimeOrTTL.Raw(c, m, Selectable.WritetimeOrTTL.Kind.WRITE_TIME); }
+    | K_TTL          '(' c=sident m=selectorModifier[c] ')'          { $s = new Selectable.WritetimeOrTTL.Raw(c, m, Selectable.WritetimeOrTTL.Kind.TTL); }
+    | K_CAST         '(' sn=unaliasedSelector K_AS t=native_type ')' { $s = new Selectable.WithCast.Raw(sn, t);}
+    | f=functionName args=selectionFunctionArgs                      { $s = new Selectable.WithFunction.Raw(f, args); }
     ;
 
 selectionLiteral returns [Term.Raw value]
index 0344de921db02cc4ab0d4e172855c9db8e38746b..c2272707ecd25dce3ebb95ca1cfe0b7a271a8254 100644 (file)
@@ -43,6 +43,7 @@ import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.StorageProxy;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -386,6 +387,12 @@ public class CassandraRoleManager implements IRoleManager
     {
         // The delay is to give the node a chance to see its peers before attempting the operation
         ScheduledExecutors.optionalTasks.scheduleSelfRecurring(() -> {
+            if (!StorageProxy.isSafeToPerformRead())
+            {
+                logger.trace("Setup task may not run due to it not being safe to perform reads... rescheduling");
+                scheduleSetupTask(setupTask);
+                return;
+            }
             try
             {
                 setupTask.call();
index 02745fe925b2df665962073366c44344dd89c1e0..e5038c09447c8fbb2af919bb01ec3765a1e2b376 100644 (file)
@@ -82,11 +82,18 @@ public interface IInternodeAuthenticator
     }
 
     /**
-     * Enum that represents connection type of an internode connection.
+     * Enum that represents connection type of internode connection.
+     *
+     * INBOUND - called after connection established, with certificate available if present.
+     * OUTBOUND - called after connection established, with certificate available if present.
+     * OUTBOUND_PRECONNECT - called before initiating a connection, without certificate available.
+     * The outbound connection will be authenticated with the certificate once a redirected connection is established.
+     * This is an extra check that can be used to detect misconfiguration before reconnection, or ignored by returning true.
      */
     enum InternodeConnectionDirection
     {
         INBOUND,
-        OUTBOUND
+        OUTBOUND,
+        OUTBOUND_PRECONNECT
     }
 }
diff --git a/src/java/org/apache/cassandra/concurrent/DebuggableTask.java b/src/java/org/apache/cassandra/concurrent/DebuggableTask.java
new file mode 100644 (file)
index 0000000..ac04eb4
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.concurrent;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * Interface to include on a Runnable or Callable submitted to the {@link SharedExecutorPool} to provide more
+ * detailed diagnostics.
+ */
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface DebuggableTask
+{
+    public long creationTimeNanos();
+
+    public long startTimeNanos();
+
+    public String description();
+    
+    interface RunnableDebuggableTask extends Runnable, DebuggableTask {}
+
+    /**
+     * Wraps a {@link DebuggableTask} to include the name of the thread running it.
+     */
+    public static class RunningDebuggableTask implements DebuggableTask
+    {
+        private final DebuggableTask task;
+        private final String threadId;
+
+        public RunningDebuggableTask(String threadId, DebuggableTask task)
+        {
+            this.task = task;
+            this.threadId = threadId;
+        }
+
+        public String threadId()
+        {
+            return threadId;
+        }
+
+        public boolean hasTask()
+        {
+            return task != null;
+        }
+
+        @Override
+        public long creationTimeNanos()
+        {
+            assert hasTask();
+            return task.creationTimeNanos();
+        }
+
+        @Override
+        public long startTimeNanos()
+        {
+            assert hasTask();
+            return task.startTimeNanos();
+        }
+
+        @Override
+        public String description()
+        {
+            assert hasTask();
+            return task.description();
+        }
+    }
+}
index 7fa7dcbd5466262cf934da86c546261dcdf2ecd5..27ab885e234eddac4fae7a5f6d6d63f3e8993bca 100644 (file)
@@ -21,6 +21,7 @@ package org.apache.cassandra.concurrent;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Future;
 
+import org.apache.cassandra.concurrent.DebuggableTask.RunnableDebuggableTask;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -105,6 +106,14 @@ public class ExecutionFailure
         return enforceOptions(withResources, wrap, false);
     }
 
+    /**
+     * @see #suppressing(WithResources, Runnable)
+     */
+    static RunnableDebuggableTask suppressingDebuggable(WithResources withResources, RunnableDebuggableTask debuggable)
+    {
+        return enforceOptionsDebuggable(withResources, debuggable, false);
+    }
+
     /**
      * Encapsulate the execution, propagating or suppressing any exceptions as requested.
      *
@@ -119,7 +128,7 @@ public class ExecutionFailure
             @Override
             public void run()
             {
-                try (Closeable close = withResources.get())
+                try (@SuppressWarnings("unused") Closeable close = withResources.get())
                 {
                     wrap.run();
                 }
@@ -139,6 +148,54 @@ public class ExecutionFailure
         };
     }
 
+    /**
+     * @see #enforceOptions(WithResources, Runnable, boolean)
+     */
+    private static RunnableDebuggableTask enforceOptionsDebuggable(WithResources withResources, RunnableDebuggableTask debuggable, boolean propagate)
+    {
+        return new RunnableDebuggableTask()
+        {
+            @Override
+            public void run()
+            {
+                try (@SuppressWarnings("unused") Closeable close = withResources.get())
+                {
+                    debuggable.run();
+                }
+                catch (Throwable t)
+                {
+                    handle(t);
+                    if (propagate)
+                        throw t;
+                }
+            }
+
+            @Override
+            public String toString()
+            {
+                return debuggable.toString();
+            }
+
+            @Override
+            public long creationTimeNanos()
+            {
+                return debuggable.creationTimeNanos();
+            }
+
+            @Override
+            public long startTimeNanos()
+            {
+                return debuggable.startTimeNanos();
+            }
+
+            @Override
+            public String description()
+            {
+                return debuggable.description();
+            }
+        };
+    }
+
     /**
      * See {@link #enforceOptions(WithResources, Callable)}
      */
@@ -158,7 +215,7 @@ public class ExecutionFailure
             @Override
             public V call() throws Exception
             {
-                try (Closeable close = withResources.get())
+                try (@SuppressWarnings("unused") Closeable close = withResources.get())
                 {
                     return wrap.call();
                 }
index f7d93e8379362c8ff0c54cac37a4bc60327df4ac..0a62747628e81cade1c0dde3a6509d49c5b5b7a0 100644 (file)
@@ -183,14 +183,25 @@ public interface ExecutorFactory extends ExecutorBuilderFactory.Jmxable<Executor
         // deliberately not volatile to ensure zero overhead outside of testing;
         // depend on other memory visibility primitives to ensure visibility
         private static ExecutorFactory FACTORY = new ExecutorFactory.Default(Global.class.getClassLoader(), null, JVMStabilityInspector::uncaughtException);
+        private static boolean modified;
+
         public static ExecutorFactory executorFactory()
         {
             return FACTORY;
         }
 
-        public static void unsafeSet(ExecutorFactory executorFactory)
+        public static synchronized void unsafeSet(ExecutorFactory executorFactory)
         {
             FACTORY = executorFactory;
+            modified = true;
+        }
+
+        public static synchronized boolean tryUnsafeSet(ExecutorFactory executorFactory)
+        {
+            if (modified)
+                return false;
+            unsafeSet(executorFactory);
+            return true;
         }
     }
 
index 2348ff6bf88c52bec3c07c0e1247d601361d373d..763884a2dad28f5309b4a2b7b09658e9c3fa4467 100644 (file)
@@ -20,9 +20,10 @@ package org.apache.cassandra.concurrent;
 
 import java.util.concurrent.Callable;
 
-import org.apache.cassandra.utils.concurrent.RunnableFuture;
+import javax.annotation.Nullable;
 
 import org.apache.cassandra.utils.concurrent.AsyncFuture;
+import org.apache.cassandra.utils.concurrent.RunnableFuture;
 
 /**
  * A FutureTask that utilises Cassandra's {@link AsyncFuture}, making it compatible with {@link ExecutorPlus}.
@@ -31,15 +32,28 @@ import org.apache.cassandra.utils.concurrent.AsyncFuture;
 public class FutureTask<V> extends AsyncFuture<V> implements RunnableFuture<V>
 {
     private Callable<? extends V> call;
+    private volatile DebuggableTask debuggable;
 
     public FutureTask(Callable<? extends V> call)
     {
-        this.call = call;
+        this(call, call instanceof DebuggableTask ? (DebuggableTask) call : null);
     }
 
     public FutureTask(Runnable run)
     {
-        this.call = callable(run);
+        this(callable(run), run instanceof DebuggableTask ? (DebuggableTask) run : null);
+    }
+
+    private FutureTask(Callable<? extends V> call, DebuggableTask debuggable)
+    {
+        this.call = call;
+        this.debuggable = debuggable;
+    }
+
+    @Nullable
+    DebuggableTask debuggableTask()
+    {
+        return debuggable;
     }
 
     V call() throws Exception
@@ -63,6 +77,7 @@ public class FutureTask<V> extends AsyncFuture<V> implements RunnableFuture<V>
         finally
         {
             call = null;
+            debuggable = null;
         }
     }
 
index c7b9abf719ab2064f08a0b7e1d4e9cb4aaf83e3c..fe16c950dfdad5e1527967fc60c72298c7854835 100644 (file)
@@ -48,6 +48,8 @@ final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnabl
     long prevStopCheck = 0;
     long soleSpinnerSpinTime = 0;
 
+    private final AtomicReference<Runnable> currentTask = new AtomicReference<>();
+
     SEPWorker(ThreadGroup threadGroup, Long workerId, Work initialState, SharedExecutorPool pool)
     {
         this.pool = pool;
@@ -58,9 +60,27 @@ final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnabl
         thread.start();
     }
 
+    /**
+     * @return the current {@link DebuggableTask}, if one exists
+     */
+    public DebuggableTask currentDebuggableTask()
+    {
+        // can change after null check so go off local reference
+        Runnable task = currentTask.get();
+
+        // Local read and mutation Runnables are themselves debuggable
+        if (task instanceof DebuggableTask)
+            return (DebuggableTask) task;
+
+        if (task instanceof FutureTask)
+            return ((FutureTask<?>) task).debuggableTask();
+            
+        return null;
+    }
+
     public void run()
     {
-        /**
+        /*
          * we maintain two important invariants:
          * 1)   after exiting spinning phase, we ensure at least one more task on _each_ queue will be processed
          *      promptly after we begin, assuming any are outstanding on any pools. this is to permit producers to
@@ -101,8 +121,10 @@ final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnabl
                 if (assigned == null)
                     continue;
                 if (SET_THREAD_NAME)
-                    Thread.currentThread().setName(assigned.name + "-" + workerId);
+                    Thread.currentThread().setName(assigned.name + '-' + workerId);
+
                 task = assigned.tasks.poll();
+                currentTask.lazySet(task);
 
                 // if we do have tasks assigned, nobody will change our state so we can simply set it to WORKING
                 // (which is also a state that will never be interrupted externally)
@@ -128,9 +150,12 @@ final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnabl
                         break;
 
                     task = assigned.tasks.poll();
+                    currentTask.lazySet(task);
                 }
 
                 // return our work permit, and maybe signal shutdown
+                currentTask.lazySet(null);
+
                 if (status != RETURNED_WORK_PERMIT)
                     assigned.returnWorkPermit();
 
@@ -173,6 +198,11 @@ final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnabl
                 logger.error("Unexpected exception killed worker", t);
             }
         }
+        finally
+        {
+            currentTask.lazySet(null);
+            pool.workerEnded(this);
+        }
     }
 
     // try to assign this worker the provided work
@@ -420,4 +450,22 @@ final class SEPWorker extends AtomicReference<SEPWorker.Work> implements Runnabl
             return assigned != null;
         }
     }
+
+    @Override
+    public String toString()
+    {
+        return thread.getName();
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return workerId.intValue();
+    }
+
+    @Override
+    public boolean equals(Object obj)
+    {
+        return obj == this;
+    }
 }
index f74854f9cb015f7cf61f50fe54108d85919a0dc2..0631ec61da011ad3e82b9463391c424d94f9766d 100644 (file)
  */
 package org.apache.cassandra.concurrent;
 
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.TimeUnit;
@@ -26,6 +29,9 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.LockSupport;
+import java.util.stream.Collectors;
+
+import org.apache.cassandra.concurrent.DebuggableTask.RunningDebuggableTask;
 
 import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 import static org.apache.cassandra.concurrent.SEPWorker.Work;
@@ -77,6 +83,8 @@ public class SharedExecutorPool
     final ConcurrentSkipListMap<Long, SEPWorker> spinning = new ConcurrentSkipListMap<>();
     // the collection of threads that have been asked to stop/deschedule - new workers are scheduled from here last
     final ConcurrentSkipListMap<Long, SEPWorker> descheduled = new ConcurrentSkipListMap<>();
+    // All SEPWorkers that are currently running
+    private final Set<SEPWorker> allWorkers = Collections.newSetFromMap(new ConcurrentHashMap<>());
 
     volatile boolean shuttingDown = false;
 
@@ -102,7 +110,23 @@ public class SharedExecutorPool
                 return;
 
         if (!work.isStop())
-            new SEPWorker(threadGroup, workerId.incrementAndGet(), work, this);
+        {
+            SEPWorker worker = new SEPWorker(threadGroup, workerId.incrementAndGet(), work, this);
+            allWorkers.add(worker);
+        }
+    }
+
+    void workerEnded(SEPWorker worker)
+    {
+        allWorkers.remove(worker);
+    }
+
+    public List<RunningDebuggableTask> runningTasks()
+    {
+        return allWorkers.stream()
+                         .map(worker -> new RunningDebuggableTask(worker.toString(), worker.currentDebuggableTask()))
+                         .filter(RunningDebuggableTask::hasTask)
+                         .collect(Collectors.toList());
     }
 
     void maybeStartSpinningWorker()
index 56087d950b28b99da7efce64395812ab377cf9bf..faeabe6c4c77c67bd727d3a01b3ecf6e0bff51ea 100644 (file)
@@ -20,6 +20,7 @@ package org.apache.cassandra.concurrent;
 
 import java.util.concurrent.Callable;
 
+import org.apache.cassandra.concurrent.DebuggableTask.RunnableDebuggableTask;
 import org.apache.cassandra.utils.Shared;
 import org.apache.cassandra.utils.WithResources;
 import org.apache.cassandra.utils.concurrent.RunnableFuture;
@@ -127,6 +128,9 @@ public interface TaskFactory
         @Override
         public Runnable toExecute(Runnable runnable)
         {
+            if (runnable instanceof RunnableDebuggableTask)
+                return ExecutionFailure.suppressingDebuggable(ExecutorLocals.propagate(), (RunnableDebuggableTask) runnable);
+
             // no reason to propagate exception when it is inaccessible to caller
             return ExecutionFailure.suppressing(ExecutorLocals.propagate(), runnable);
         }
index 6eea3239765e5eebcedb764b3976de53f6c98823..00c2f4cd28bef444d83c74bda7ca0081178a1a46 100644 (file)
@@ -289,10 +289,12 @@ public enum CassandraRelevantProperties
     /** property for the interval on which the repeated client warnings and diagnostic events about disk usage are ignored */
     DISK_USAGE_NOTIFY_INTERVAL_MS("cassandra.disk_usage.notify_interval_ms", Long.toString(TimeUnit.MINUTES.toMillis(30))),
 
+    /** Controls the type of bufffer (heap/direct) used for shared scratch buffers */
+    DATA_OUTPUT_BUFFER_ALLOCATE_TYPE("cassandra.dob.allocate_type"),
+
     // for specific tests
     ORG_APACHE_CASSANDRA_CONF_CASSANDRA_RELEVANT_PROPERTIES_TEST("org.apache.cassandra.conf.CassandraRelevantPropertiesTest"),
     ORG_APACHE_CASSANDRA_DB_VIRTUAL_SYSTEM_PROPERTIES_TABLE_TEST("org.apache.cassandra.db.virtual.SystemPropertiesTableTest"),
-
     ;
 
 
@@ -454,6 +456,40 @@ public enum CassandraRelevantProperties
         System.setProperty(key, Long.toString(value));
     }
 
+    /**
+     * Gets the value of a system property as a enum, calling {@link String#toUpperCase()} first.
+     *
+     * @param defaultValue to return when not defined
+     * @param <T> type
+     * @return enum value
+     */
+    public <T extends Enum<T>> T getEnum(T defaultValue) {
+        return getEnum(true, defaultValue);
+    }
+
+    /**
+     * Gets the value of a system property as a enum, optionally calling {@link String#toUpperCase()} first.
+     *
+     * @param toUppercase before converting to enum
+     * @param defaultValue to return when not defined
+     * @param <T> type
+     * @return enum value
+     */
+    public <T extends Enum<T>> T getEnum(boolean toUppercase, T defaultValue) {
+        String value = System.getProperty(key);
+        if (value == null)
+            return defaultValue;
+        return Enum.valueOf(defaultValue.getDeclaringClass(), toUppercase ? value.toUpperCase() : value);
+    }
+
+    /**
+     * Sets the value into system properties.
+     * @param value to set
+     */
+    public void setEnum(Enum<?> value) {
+        System.setProperty(key, value.name());
+    }
+
     public interface PropertyConverter<T>
     {
         T convert(String value);
index 3048a9a411dcdd868094733f855ad033d03de53e..68091ac90f27491e9ab894197167f2f5a0002b0e 100644 (file)
@@ -28,6 +28,8 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.function.Supplier;
 
+import javax.annotation.Nullable;
+
 import com.google.common.base.Joiner;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
@@ -315,7 +317,7 @@ public class Config
     public Integer unlogged_batch_across_partitions_warn_threshold = 10;
     public volatile Integer concurrent_compactors;
     @Replaces(oldName = "compaction_throughput_mb_per_sec", converter = Converters.MEBIBYTES_PER_SECOND_DATA_RATE, deprecated = true)
-    public volatile DataRateSpec.IntMebibytesPerSecondBound compaction_throughput = new DataRateSpec.IntMebibytesPerSecondBound("16MiB/s");
+    public volatile DataRateSpec.LongBytesPerSecondBound compaction_throughput = new DataRateSpec.LongBytesPerSecondBound("64MiB/s");
     @Replaces(oldName = "compaction_large_partition_warning_threshold_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
     public volatile DataStorageSpec.IntMebibytesBound compaction_large_partition_warning_threshold = new DataStorageSpec.IntMebibytesBound("100MiB");
     @Replaces(oldName = "min_free_space_per_drive_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
@@ -325,19 +327,22 @@ public class Config
     public volatile int concurrent_materialized_view_builders = 1;
     public volatile int reject_repair_compaction_threshold = Integer.MAX_VALUE;
 
+    // The number of executors to use for building secondary indexes
+    public int concurrent_index_builders = 2;
+
     /**
      * @deprecated retry support removed on CASSANDRA-10992
      */
     @Deprecated
     public int max_streaming_retries = 3;
 
-    @Replaces(oldName = "stream_throughput_outbound_megabits_per_sec", converter = Converters.MEGABITS_TO_MEBIBYTES_PER_SECOND_DATA_RATE, deprecated = true)
-    public volatile DataRateSpec.IntMebibytesPerSecondBound stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound("24MiB/s");
-    @Replaces(oldName = "inter_dc_stream_throughput_outbound_megabits_per_sec", converter = Converters.MEGABITS_TO_MEBIBYTES_PER_SECOND_DATA_RATE, deprecated = true)
-    public volatile DataRateSpec.IntMebibytesPerSecondBound inter_dc_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound("24MiB/s");
+    @Replaces(oldName = "stream_throughput_outbound_megabits_per_sec", converter = Converters.MEGABITS_TO_BYTES_PER_SECOND_DATA_RATE, deprecated = true)
+    public volatile DataRateSpec.LongBytesPerSecondBound stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
+    @Replaces(oldName = "inter_dc_stream_throughput_outbound_megabits_per_sec", converter = Converters.MEGABITS_TO_BYTES_PER_SECOND_DATA_RATE, deprecated = true)
+    public volatile DataRateSpec.LongBytesPerSecondBound inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
 
-    public volatile DataRateSpec.IntMebibytesPerSecondBound entire_sstable_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound("24MiB/s");
-    public volatile DataRateSpec.IntMebibytesPerSecondBound entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound("24MiB/s");
+    public volatile DataRateSpec.LongBytesPerSecondBound entire_sstable_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
+    public volatile DataRateSpec.LongBytesPerSecondBound entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound("24MiB/s");
 
     public String[] data_file_directories = new String[0];
 
@@ -426,7 +431,8 @@ public class Config
     @Replaces(oldName = "trickle_fsync_interval_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true)
     public DataStorageSpec.IntKibibytesBound trickle_fsync_interval = new DataStorageSpec.IntKibibytesBound("10240KiB");
 
-    @Replaces(oldName = "sstable_preemptive_open_interval_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
+    @Nullable
+    @Replaces(oldName = "sstable_preemptive_open_interval_in_mb", converter = Converters.NEGATIVE_MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
     public volatile DataStorageSpec.IntMebibytesBound sstable_preemptive_open_interval = new DataStorageSpec.IntMebibytesBound("50MiB");
 
     public volatile boolean key_cache_migrate_during_compaction = true;
@@ -451,7 +457,7 @@ public class Config
 
     public DataStorageSpec.LongMebibytesBound paxos_cache_size = null;
 
-    @Replaces(oldName = "cache_load_timeout_seconds", converter = Converters.SECONDS_DURATION, deprecated = true)
+    @Replaces(oldName = "cache_load_timeout_seconds", converter = Converters.NEGATIVE_SECONDS_DURATION, deprecated = true)
     public DurationSpec.IntSecondsBound cache_load_timeout = new DurationSpec.IntSecondsBound("30s");
 
     private static boolean isClientMode = false;
@@ -504,13 +510,14 @@ public class Config
 
     @Replaces(oldName = "index_summary_capacity_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_LONG, deprecated = true)
     public volatile DataStorageSpec.LongMebibytesBound index_summary_capacity;
-    @Replaces(oldName = "index_summary_resize_interval_in_minutes", converter = Converters.MINUTES_DURATION, deprecated = true)
+    @Nullable
+    @Replaces(oldName = "index_summary_resize_interval_in_minutes", converter = Converters.MINUTES_CUSTOM_DURATION, deprecated = true)
     public volatile DurationSpec.IntMinutesBound index_summary_resize_interval = new DurationSpec.IntMinutesBound("60m");
 
     @Replaces(oldName = "gc_log_threshold_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
-    public DurationSpec.IntMillisecondsBound gc_log_threshold = new DurationSpec.IntMillisecondsBound("200ms");
+    public volatile DurationSpec.IntMillisecondsBound gc_log_threshold = new DurationSpec.IntMillisecondsBound("200ms");
     @Replaces(oldName = "gc_warn_threshold_in_ms", converter = Converters.MILLIS_DURATION_INT, deprecated = true)
-    public DurationSpec.IntMillisecondsBound gc_warn_threshold = new DurationSpec.IntMillisecondsBound("1s");
+    public volatile DurationSpec.IntMillisecondsBound gc_warn_threshold = new DurationSpec.IntMillisecondsBound("1s");
 
     // TTL for different types of trace events.
     @Replaces(oldName = "tracetype_query_ttl", converter = Converters.SECONDS_DURATION, deprecated=true)
@@ -780,7 +787,7 @@ public class Config
     public volatile boolean auto_optimise_preview_repair_streams = false;
 
     // see CASSANDRA-17048 and the comment in cassandra.yaml
-    public boolean enable_uuid_sstable_identifiers = false;
+    public boolean uuid_sstable_identifiers_enabled = false;
 
     /**
      * Client mode means that the process is a pure client, that uses C* code base but does
@@ -831,6 +838,7 @@ public class Config
     public volatile boolean alter_table_enabled = true;
     public volatile boolean group_by_enabled = true;
     public volatile boolean drop_truncate_table_enabled = true;
+    public volatile boolean drop_keyspace_enabled = true;
     public volatile boolean secondary_indexes_enabled = true;
     public volatile boolean uncompressed_tables_enabled = true;
     public volatile boolean compact_tables_enabled = true;
@@ -1040,7 +1048,7 @@ public class Config
 
     public volatile int max_top_size_partition_count = 10;
     public volatile int max_top_tombstone_partition_count = 10;
-    public volatile DataStorageSpec.LongBytesBound min_tracked_partition_size_bytes = new DataStorageSpec.LongBytesBound("1MiB");
+    public volatile DataStorageSpec.LongBytesBound min_tracked_partition_size = new DataStorageSpec.LongBytesBound("1MiB");
     public volatile long min_tracked_partition_tombstone_count = 5000;
     public volatile boolean top_partitions_enabled = true;
 
index ccfc87b4d53d886b41d5e4c12efa11024885c29f..c898c08d648223836f2649114b0d0b75cf7fba75 100644 (file)
@@ -21,6 +21,8 @@ package org.apache.cassandra.config;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Function;
 
+import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND;
+
 /**
  * Converters for backward compatibility with the old cassandra.yaml where duration, data rate and
  * data storage configuration parameters were provided only by value and the expected unit was part of the configuration
@@ -40,10 +42,10 @@ public enum Converters
     IDENTITY(null, null, o -> o, o -> o),
     MILLIS_DURATION_LONG(Long.class, DurationSpec.LongMillisecondsBound.class,
                          DurationSpec.LongMillisecondsBound::new,
-                         o -> o.toMilliseconds()),
+                         o -> o == null ? null : o.toMilliseconds()),
     MILLIS_DURATION_INT(Integer.class, DurationSpec.IntMillisecondsBound.class,
                         DurationSpec.IntMillisecondsBound::new,
-                        DurationSpec.IntMillisecondsBound::toMilliseconds),
+                        o -> o == null ? null : o.toMilliseconds()),
     MILLIS_DURATION_DOUBLE(Double.class, DurationSpec.IntMillisecondsBound.class,
                            o -> Double.isNaN(o) ? new DurationSpec.IntMillisecondsBound(0) :
                                 new DurationSpec.IntMillisecondsBound(o, TimeUnit.MILLISECONDS),
@@ -57,10 +59,10 @@ public enum Converters
                            o -> o == null ? -1 : o.toMilliseconds()),
     SECONDS_DURATION(Integer.class, DurationSpec.IntSecondsBound.class,
                      DurationSpec.IntSecondsBound::new,
-                     DurationSpec.IntSecondsBound::toSeconds),
+                     o -> o == null ? null : o.toSeconds()),
     NEGATIVE_SECONDS_DURATION(Integer.class, DurationSpec.IntSecondsBound.class,
                               o -> o < 0 ? new DurationSpec.IntSecondsBound(0) : new DurationSpec.IntSecondsBound(o),
-                              DurationSpec.IntSecondsBound::toSeconds),
+                              o -> o == null ? null : o.toSeconds()),
     /**
      * This converter is used to support backward compatibility for Duration parameters where we added the opportunity
      * for the users to add a unit in the parameters' values but we didn't change the names. (key_cache_save_period,
@@ -69,22 +71,29 @@ public enum Converters
      */
     SECONDS_CUSTOM_DURATION(String.class, DurationSpec.IntSecondsBound.class,
                             DurationSpec.IntSecondsBound::inSecondsString,
-                            o -> Long.toString(o.toSeconds())),
-    MINUTES_DURATION(Integer.class, DurationSpec.IntMinutesBound.class,
-                     DurationSpec.IntMinutesBound::new,
-                     DurationSpec.IntMinutesBound::toMinutes),
+                            o -> o == null ? null : Long.toString(o.toSeconds())),
+    /**
+     * This converter is used to support backward compatibility for parameters where in the past -1 was used as a value
+     * Example:  index_summary_resize_interval_in_minutes = -1 and  index_summary_resize_interval = null are equal.
+     */
+    MINUTES_CUSTOM_DURATION(Integer.class, DurationSpec.IntMinutesBound.class,
+                            o -> o == -1 ? null : new DurationSpec.IntMinutesBound(o),
+                            o -> o == null ? -1 : o.toMinutes()),
     MEBIBYTES_DATA_STORAGE_LONG(Long.class, DataStorageSpec.LongMebibytesBound.class,
                                 DataStorageSpec.LongMebibytesBound::new,
-                                DataStorageSpec.LongMebibytesBound::toMebibytes),
+                                o -> o == null ? null : o.toMebibytes()),
     MEBIBYTES_DATA_STORAGE_INT(Integer.class, DataStorageSpec.IntMebibytesBound.class,
                                DataStorageSpec.IntMebibytesBound::new,
-                               DataStorageSpec.IntMebibytesBound::toMebibytes),
+                               o -> o == null ? null : o.toMebibytes()),
+    NEGATIVE_MEBIBYTES_DATA_STORAGE_INT(Integer.class, DataStorageSpec.IntMebibytesBound.class,
+                                        o -> o < 0 ? null : new DataStorageSpec.IntMebibytesBound(o),
+                                        o -> o == null ? -1 : o.toMebibytes()),
     KIBIBYTES_DATASTORAGE(Integer.class, DataStorageSpec.IntKibibytesBound.class,
                           DataStorageSpec.IntKibibytesBound::new,
-                          DataStorageSpec.IntKibibytesBound::toKibibytes),
+                          o -> o == null ? null : o.toKibibytes()),
     BYTES_DATASTORAGE(Integer.class, DataStorageSpec.IntBytesBound.class,
                       DataStorageSpec.IntBytesBound::new,
-                      DataStorageSpec.IntBytesBound::toBytes),
+                      o -> o == null ? null : o.toBytes()),
     /**
      * This converter is used to support backward compatibility for parameters where in the past negative number was used as a value
      * Example: native_transport_max_concurrent_requests_in_bytes_per_ip = -1 and native_transport_max_request_data_in_flight_per_ip = null
@@ -92,17 +101,17 @@ public enum Converters
      */
     BYTES_CUSTOM_DATASTORAGE(Long.class, DataStorageSpec.LongBytesBound.class,
                              o -> o == -1 ? null : new DataStorageSpec.LongBytesBound(o),
-                             DataStorageSpec.LongBytesBound::toBytes),
-    MEBIBYTES_PER_SECOND_DATA_RATE(Integer.class, DataRateSpec.IntMebibytesPerSecondBound.class,
-                                   DataRateSpec.IntMebibytesPerSecondBound::new,
-                                   DataRateSpec.IntMebibytesPerSecondBound::toMebibytesPerSecondAsInt),
+                             o -> o == null ? null : o.toBytes()),
+    MEBIBYTES_PER_SECOND_DATA_RATE(Integer.class, DataRateSpec.LongBytesPerSecondBound.class,
+                                   i -> new DataRateSpec.LongBytesPerSecondBound(i, MEBIBYTES_PER_SECOND),
+                                   o -> o == null ? null : o.toMebibytesPerSecondAsInt()),
     /**
      * This converter is a custom one to support backward compatibility for stream_throughput_outbound and
-     * inter_dc_stream_throughput_outbound which were provided in megatibs per second prior CASSANDRA-15234.
+     * inter_dc_stream_throughput_outbound which were provided in megabits per second prior CASSANDRA-15234.
      */
-    MEGABITS_TO_MEBIBYTES_PER_SECOND_DATA_RATE(Integer.class, DataRateSpec.IntMebibytesPerSecondBound.class,
-                                               i -> DataRateSpec.IntMebibytesPerSecondBound.megabitsPerSecondInMebibytesPerSecond(i),
-                                               DataRateSpec.IntMebibytesPerSecondBound::toMegabitsPerSecondAsInt);
+    MEGABITS_TO_BYTES_PER_SECOND_DATA_RATE(Integer.class, DataRateSpec.LongBytesPerSecondBound.class,
+                                           i -> DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(i),
+                                           o -> o == null ? null : o.toMegabitsPerSecondAsInt());
     private final Class<?> oldType;
     private final Class<?> newType;
     private final Function<Object, Object> convert;
@@ -160,7 +169,6 @@ public enum Converters
      */
     public Object unconvert(Object value)
     {
-        if (value == null) return null;
         return reverseConvert.apply(value);
     }
 }
index 34eac7465a6061a0a7c01e77a03a9983e4278b74..1ec2d1e77420d0bd31b56ab6578d97875fc71a84 100644 (file)
@@ -23,10 +23,10 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 
+import com.google.common.math.DoubleMath;
 import com.google.common.primitives.Ints;
 
 import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.BYTES_PER_SECOND;
-import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND;
 
 /**
  * Represents a data rate type used for cassandra configuration. It supports the opportunity for the users to be able to
@@ -39,7 +39,7 @@ public abstract class DataRateSpec
      */
     private static final Pattern UNITS_PATTERN = Pattern.compile("^(\\d+)(MiB/s|KiB/s|B/s)$");
 
-    private final double quantity;
+    private final long quantity;
 
     private final DataRateUnit unit;
 
@@ -52,7 +52,7 @@ public abstract class DataRateSpec
             throw new IllegalArgumentException("Invalid data rate: " + value + " Accepted units: MiB/s, KiB/s, B/s where " +
                                                 "case matters and " + "only non-negative values are valid");
 
-        quantity = (double) Long.parseLong(matcher.group(1));
+        quantity = Long.parseLong(matcher.group(1));
         unit = DataRateUnit.fromSymbol(matcher.group(2));
     }
 
@@ -63,7 +63,7 @@ public abstract class DataRateSpec
         validateQuantity(value, quantity(), unit(), minUnit, max);
     }
 
-    private DataRateSpec(double quantity, DataRateUnit unit, DataRateUnit minUnit, long max)
+    private DataRateSpec(long quantity, DataRateUnit unit, DataRateUnit minUnit, long max)
     {
         this.quantity = quantity;
         this.unit = unit;
@@ -212,7 +212,7 @@ public abstract class DataRateSpec
     @Override
     public String toString()
     {
-        return Math.round(quantity) + unit.symbol;
+        return (DoubleMath.isMathematicalInteger(quantity) ? (long) quantity : quantity) + unit.symbol;
     }
 
     /**
@@ -238,7 +238,7 @@ public abstract class DataRateSpec
          * @param quantity where quantity shouldn't be bigger than Long.MAX_VALUE - 1 in bytes per second
          * @param unit     in which the provided quantity is
          */
-        public LongBytesPerSecondBound(double quantity, DataRateUnit unit)
+        public LongBytesPerSecondBound(long quantity, DataRateUnit unit)
         {
             super(quantity, unit, BYTES_PER_SECOND, Long.MAX_VALUE);
         }
@@ -252,59 +252,21 @@ public abstract class DataRateSpec
         {
             this(bytesPerSecond, BYTES_PER_SECOND);
         }
-    }
-
-    /**
-     * Represents a data rate used for Cassandra configuration. The bound is [0, Integer.MAX_VALUE) in mebibytes per second.
-     * If the user sets a different unit - we still validate that converted to mebibytes per second the quantity will not exceed
-     * that upper bound. (CASSANDRA-17571)
-     */
-    public final static class IntMebibytesPerSecondBound extends DataRateSpec
-    {
-        /**
-         * Creates a {@code DataRateSpec.IntMebibytesPerSecondBound} of the specified amount with bound [0, Integer.MAX_VALUE) mebibytes per second.
-         *
-         * @param value the data rate
-         */
-        public IntMebibytesPerSecondBound(String value)
-        {
-            super(value, MEBIBYTES_PER_SECOND, Integer.MAX_VALUE);
-        }
-
-        /**
-         * Creates a {@code DataRateSpec.IntMebibytesPerSecondBound} of the specified amount in the specified unit.
-         *
-         * @param quantity where quantity shouldn't be bigger than Integer.MAX_VALUE - 1 in mebibytes per second
-         * @param unit     in which the provided quantity is
-         */
-        public IntMebibytesPerSecondBound(double quantity, DataRateUnit unit)
-        {
-            super(quantity, unit, MEBIBYTES_PER_SECOND, Integer.MAX_VALUE);
-        }
-
-        /**
-         * Creates a {@code DataRateSpec.IntMebibytesPerSecondBound} of the specified amount in mebibytes per second.
-         *
-         * @param mebibytesPerSecond where mebibytesPerSecond shouldn't be bigger than Long.MAX_VALUE-1
-         */
-        public IntMebibytesPerSecondBound(long mebibytesPerSecond)
-        {
-            this (mebibytesPerSecond, MEBIBYTES_PER_SECOND);
-        }
 
         // this one should be used only for backward compatibility for stream_throughput_outbound and inter_dc_stream_throughput_outbound
         // which were in megabits per second in 4.0. Do not start using it for any new properties
-        public static IntMebibytesPerSecondBound megabitsPerSecondInMebibytesPerSecond(long megabitsPerSecond)
+        @Deprecated
+        public static LongBytesPerSecondBound megabitsPerSecondInBytesPerSecond(long megabitsPerSecond)
         {
-            final double MEBIBYTES_PER_MEGABIT = 0.119209289550781;
-            double mebibytesPerSecond = (double) megabitsPerSecond * MEBIBYTES_PER_MEGABIT;
+            final long BYTES_PER_MEGABIT = 125_000;
+            long bytesPerSecond = megabitsPerSecond * BYTES_PER_MEGABIT;
 
             if (megabitsPerSecond >= Integer.MAX_VALUE)
                 throw new IllegalArgumentException("Invalid data rate: " + megabitsPerSecond + " megabits per second; " +
-                                                 "stream_throughput_outbound and inter_dc_stream_throughput_outbound" +
-                                                 " should be between 0 and " + Integer.MAX_VALUE + " in megabits per second");
+                                                   "stream_throughput_outbound and inter_dc_stream_throughput_outbound" +
+                                                   " should be between 0 and " + (Integer.MAX_VALUE - 1) + " in megabits per second");
 
-            return new IntMebibytesPerSecondBound(mebibytesPerSecond, MEBIBYTES_PER_SECOND);
+            return new LongBytesPerSecondBound(bytesPerSecond, BYTES_PER_SECOND);
         }
     }
 
@@ -385,7 +347,7 @@ public abstract class DataRateSpec
             {
                 if (d > MAX / (MEGABITS_PER_MEBIBYTE))
                     return MAX;
-                return Math.round(d * MEGABITS_PER_MEBIBYTE);
+                return d * MEGABITS_PER_MEBIBYTE;
             }
 
             public double convert(double source, DataRateUnit sourceUnit)
index 9a4348d6338c51b0b5e70ea2aa8689ef33fec45f..f0d3acaa61cd7315e3861155630ddb0a10ae2be3 100644 (file)
@@ -315,6 +315,14 @@ public abstract class DataStorageSpec
         {
             return Ints.saturatedCast(unit().toKibibytes(quantity()));
         }
+
+        /**
+         * @return the amount of data storage in bytes.
+         */
+        public long toBytesInLong()
+        {
+           return unit().toBytes(quantity());
+        }
     }
 
     /**
@@ -447,6 +455,16 @@ public abstract class DataStorageSpec
         {
             return Ints.saturatedCast(unit().toMebibytes(quantity()));
         }
+
+        /**
+         * Returns the amount of data storage in bytes as {@code long}
+         *
+         * @return the amount of data storage in bytes.
+         */
+        public long toBytesInLong()
+        {
+            return unit().toBytes(quantity());
+        }
     }
 
     public enum DataStorageUnit
index 2bd1aa8400558818b7a9ea94c13afb2dcdc8e579..1ce16052feae79950e0a841f92e7bd398e9a26cf 100644 (file)
@@ -93,6 +93,8 @@ import org.apache.cassandra.utils.FBUtilities;
 import static org.apache.cassandra.config.CassandraRelevantProperties.OS_ARCH;
 import static org.apache.cassandra.config.CassandraRelevantProperties.SUN_ARCH_DATA_MODEL;
 import static org.apache.cassandra.config.CassandraRelevantProperties.TEST_JVM_DTEST_DISABLE_SSL;
+import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.BYTES_PER_SECOND;
+import static org.apache.cassandra.config.DataRateSpec.DataRateUnit.MEBIBYTES_PER_SECOND;
 import static org.apache.cassandra.config.DataStorageSpec.DataStorageUnit.MEBIBYTES;
 import static org.apache.cassandra.io.util.FileUtils.ONE_GIB;
 import static org.apache.cassandra.io.util.FileUtils.ONE_MIB;
@@ -281,7 +283,7 @@ public class DatabaseDescriptor
         if (clientInitialized)
             return;
         clientInitialized = true;
-
+        setDefaultFailureDetector();
         Config.setClientMode(true);
         conf = new Config();
         diskOptimizationStrategy = new SpinningDiskOptimizationStrategy();
@@ -398,16 +400,7 @@ public class DatabaseDescriptor
         //InetAddressAndPort and get the right defaults
         InetAddressAndPort.initializeDefaultPort(getStoragePort());
 
-        // below 2 checks are needed in order to match the pre-CASSANDRA-15234 upper bound for those parameters which were still in megabits per second
-        if (conf.stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
-        {
-            throw new ConfigurationException("Invalid value of stream_throughput_outbound: " + conf.stream_throughput_outbound.toString(), false);
-        }
-
-        if (conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
-        {
-            throw new ConfigurationException("Invalid value of inter_dc_stream_throughput_outbound: " + conf.inter_dc_stream_throughput_outbound.toString(), false);
-        }
+        validateUpperBoundStreamingConfig();
 
         if (conf.auto_snapshot_ttl != null)
         {
@@ -729,6 +722,9 @@ public class DatabaseDescriptor
 
             if (preparedStatementsCacheSizeInMiB == 0)
                 throw new NumberFormatException(); // to escape duplicating error message
+
+            // we need this assignment for the Settings virtual table - CASSANDRA-17734
+            conf.prepared_statements_cache_size = new DataStorageSpec.LongMebibytesBound(preparedStatementsCacheSizeInMiB);
         }
         catch (NumberFormatException e)
         {
@@ -745,6 +741,9 @@ public class DatabaseDescriptor
 
             if (keyCacheSizeInMiB < 0)
                 throw new NumberFormatException(); // to escape duplicating error message
+
+            // we need this assignment for the Settings Virtual Table - CASSANDRA-17734
+            conf.key_cache_size = new DataStorageSpec.LongMebibytesBound(keyCacheSizeInMiB);
         }
         catch (NumberFormatException e)
         {
@@ -784,6 +783,9 @@ public class DatabaseDescriptor
                     + conf.paxos_cache_size + "', supported values are <integer> >= 0.", false);
         }
 
+        // we need this assignment for the Settings virtual table - CASSANDRA-17735
+        conf.counter_cache_size = new DataStorageSpec.LongMebibytesBound(counterCacheSizeInMiB);
+
         // if set to empty/"auto" then use 5% of Heap size
         indexSummaryCapacityInMiB = (conf.index_summary_capacity == null)
                                    ? Math.max(1, (int) (Runtime.getRuntime().totalMemory() * 0.05 / 1024 / 1024))
@@ -793,6 +795,9 @@ public class DatabaseDescriptor
             throw new ConfigurationException("index_summary_capacity option was set incorrectly to '"
                                              + conf.index_summary_capacity.toString() + "', it should be a non-negative integer.", false);
 
+        // we need this assignment for the Settings virtual table - CASSANDRA-17735
+        conf.index_summary_capacity = new DataStorageSpec.LongMebibytesBound(indexSummaryCapacityInMiB);
+
         if (conf.user_defined_functions_fail_timeout.toMilliseconds() < conf.user_defined_functions_warn_timeout.toMilliseconds())
             throw new ConfigurationException("user_defined_functions_warn_timeout must less than user_defined_function_fail_timeout", false);
 
@@ -903,6 +908,36 @@ public class DatabaseDescriptor
         logInitializationOutcome(logger);
     }
 
+    @VisibleForTesting
+    static void validateUpperBoundStreamingConfig() throws ConfigurationException
+    {
+        // below 2 checks are needed in order to match the pre-CASSANDRA-15234 upper bound for those parameters which were still in megabits per second
+        if (conf.stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of stream_throughput_outbound: " + conf.stream_throughput_outbound.toString(), false);
+        }
+
+        if (conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of inter_dc_stream_throughput_outbound: " + conf.inter_dc_stream_throughput_outbound.toString(), false);
+        }
+
+        if (conf.entire_sstable_stream_throughput_outbound.toMebibytesPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of entire_sstable_stream_throughput_outbound: " + conf.entire_sstable_stream_throughput_outbound.toString(), false);
+        }
+
+        if (conf.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of entire_sstable_inter_dc_stream_throughput_outbound: " + conf.entire_sstable_inter_dc_stream_throughput_outbound.toString(), false);
+        }
+
+        if (conf.compaction_throughput.toMebibytesPerSecond() >= Integer.MAX_VALUE)
+        {
+            throw new ConfigurationException("Invalid value of compaction_throughput: " + conf.compaction_throughput.toString(), false);
+        }
+    }
+
     @VisibleForTesting
     static void applyConcurrentValidations(Config config)
     {
@@ -1607,9 +1642,7 @@ public class DatabaseDescriptor
 
     public static void setColumnIndexSize(int val)
     {
-        DataStorageSpec.IntKibibytesBound memory = new DataStorageSpec.IntKibibytesBound(val);
-        checkValidForByteConversion(memory, "column_index_size");
-        conf.column_index_size = new DataStorageSpec.IntKibibytesBound(val);
+        conf.column_index_size =  createIntKibibyteBoundAndEnsureItIsValidForByteConversion(val,"column_index_size");
     }
 
     public static int getColumnIndexCacheSize()
@@ -1624,9 +1657,7 @@ public class DatabaseDescriptor
 
     public static void setColumnIndexCacheSize(int val)
     {
-        DataStorageSpec.IntKibibytesBound memory = new DataStorageSpec.IntKibibytesBound(val);
-        checkValidForByteConversion(memory, "column_index_cache_size");
-        conf.column_index_cache_size = new DataStorageSpec.IntKibibytesBound(val);
+        conf.column_index_cache_size = createIntKibibyteBoundAndEnsureItIsValidForByteConversion(val,"column_index_cache_size");
     }
 
     public static int getBatchSizeWarnThreshold()
@@ -1641,7 +1672,7 @@ public class DatabaseDescriptor
 
     public static long getBatchSizeFailThreshold()
     {
-        return conf.batch_size_fail_threshold.toBytes();
+        return conf.batch_size_fail_threshold.toBytesInLong();
     }
 
     public static int getBatchSizeFailThresholdInKiB()
@@ -1656,9 +1687,7 @@ public class DatabaseDescriptor
 
     public static void setBatchSizeWarnThresholdInKiB(int threshold)
     {
-        DataStorageSpec.IntKibibytesBound storage = new DataStorageSpec.IntKibibytesBound(threshold);
-        checkValidForByteConversion(storage, "batch_size_warn_threshold");
-        conf.batch_size_warn_threshold = new DataStorageSpec.IntKibibytesBound(threshold);
+        conf.batch_size_warn_threshold = createIntKibibyteBoundAndEnsureItIsValidForByteConversion(threshold,"batch_size_warn_threshold");
     }
 
     public static void setBatchSizeFailThresholdInKiB(int threshold)
@@ -1956,17 +1985,38 @@ public class DatabaseDescriptor
         return conf.compaction_throughput.toMebibytesPerSecondAsInt();
     }
 
+    public static double getCompactionThroughputBytesPerSec()
+    {
+        return conf.compaction_throughput.toBytesPerSecond();
+    }
+
     public static double getCompactionThroughputMebibytesPerSec()
     {
         return conf.compaction_throughput.toMebibytesPerSecond();
     }
 
+    @VisibleForTesting // only for testing!
+    public static void setCompactionThroughputBytesPerSec(int value)
+    {
+        if (BYTES_PER_SECOND.toMebibytesPerSecond(value) >= Integer.MAX_VALUE)
+            throw new IllegalArgumentException("compaction_throughput: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in MiB/s");
+
+        conf.compaction_throughput = new DataRateSpec.LongBytesPerSecondBound(value);
+    }
+
     public static void setCompactionThroughputMebibytesPerSec(int value)
     {
-        conf.compaction_throughput = new DataRateSpec.IntMebibytesPerSecondBound(value);
+        if (value == Integer.MAX_VALUE)
+            throw new IllegalArgumentException("compaction_throughput: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in MiB/s");
+
+        conf.compaction_throughput = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
     }
 
-    public static long getCompactionLargePartitionWarningThreshold() { return conf.compaction_large_partition_warning_threshold.toBytes(); }
+    public static long getCompactionLargePartitionWarningThreshold() { return conf.compaction_large_partition_warning_threshold.toBytesInLong(); }
 
     public static int getCompactionTombstoneWarningThreshold()
     {
@@ -1983,6 +2033,11 @@ public class DatabaseDescriptor
         return conf.concurrent_validations;
     }
 
+    public static int getConcurrentIndexBuilders()
+    {
+        return conf.concurrent_index_builders;
+    }
+
     public static void setConcurrentValidations(int value)
     {
         value = value > 0 ? value : Integer.MAX_VALUE;
@@ -2001,7 +2056,7 @@ public class DatabaseDescriptor
 
     public static long getMinFreeSpacePerDriveInBytes()
     {
-        return conf.min_free_space_per_drive.toBytes();
+        return conf.min_free_space_per_drive.toBytesInLong();
     }
 
     public static boolean getDisableSTCSInL0()
@@ -2019,19 +2074,39 @@ public class DatabaseDescriptor
         return conf.stream_throughput_outbound.toMegabitsPerSecondAsInt();
     }
 
+    public static double getStreamThroughputOutboundMegabitsPerSecAsDouble()
+    {
+        return conf.stream_throughput_outbound.toMegabitsPerSecond();
+    }
+
     public static double getStreamThroughputOutboundMebibytesPerSec()
     {
         return conf.stream_throughput_outbound.toMebibytesPerSecond();
     }
 
-    public static void setStreamThroughputOutboundMegabitsPerSec(int value)
+    public static double getStreamThroughputOutboundBytesPerSec()
     {
-        conf.stream_throughput_outbound = DataRateSpec.IntMebibytesPerSecondBound.megabitsPerSecondInMebibytesPerSecond(value);
+        return conf.stream_throughput_outbound.toBytesPerSecond();
     }
 
-    public static int getEntireSSTableStreamThroughputOutboundMebibytesPerSecAsInt()
+    public static int getStreamThroughputOutboundMebibytesPerSecAsInt()
     {
-        return conf.entire_sstable_stream_throughput_outbound.toMebibytesPerSecondAsInt();
+        return conf.stream_throughput_outbound.toMebibytesPerSecondAsInt();
+    }
+
+    public static void setStreamThroughputOutboundMebibytesPerSecAsInt(int value)
+    {
+        if (MEBIBYTES_PER_SECOND.toMegabitsPerSecond(value) >= Integer.MAX_VALUE)
+            throw new IllegalArgumentException("stream_throughput_outbound: " + value  +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in megabits/s");
+
+        conf.stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
+    }
+
+    public static void setStreamThroughputOutboundMegabitsPerSec(int value)
+    {
+        conf.stream_throughput_outbound = DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(value);
     }
 
     public static double getEntireSSTableStreamThroughputOutboundMebibytesPerSec()
@@ -2039,9 +2114,19 @@ public class DatabaseDescriptor
         return conf.entire_sstable_stream_throughput_outbound.toMebibytesPerSecond();
     }
 
+    public static double getEntireSSTableStreamThroughputOutboundBytesPerSec()
+    {
+        return conf.entire_sstable_stream_throughput_outbound.toBytesPerSecond();
+    }
+
     public static void setEntireSSTableStreamThroughputOutboundMebibytesPerSec(int value)
     {
-        conf.entire_sstable_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound(value);
+        if (value == Integer.MAX_VALUE)
+            throw new IllegalArgumentException("entire_sstable_stream_throughput_outbound: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in MiB/s");
+
+        conf.entire_sstable_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
     }
 
     public static int getInterDCStreamThroughputOutboundMegabitsPerSec()
@@ -2049,29 +2134,59 @@ public class DatabaseDescriptor
         return conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecondAsInt();
     }
 
+    public static double getInterDCStreamThroughputOutboundMegabitsPerSecAsDouble()
+    {
+        return conf.inter_dc_stream_throughput_outbound.toMegabitsPerSecond();
+    }
+
     public static double getInterDCStreamThroughputOutboundMebibytesPerSec()
     {
         return conf.inter_dc_stream_throughput_outbound.toMebibytesPerSecond();
     }
 
+    public static double getInterDCStreamThroughputOutboundBytesPerSec()
+    {
+        return conf.inter_dc_stream_throughput_outbound.toBytesPerSecond();
+    }
+
+    public static int getInterDCStreamThroughputOutboundMebibytesPerSecAsInt()
+    {
+        return conf.inter_dc_stream_throughput_outbound.toMebibytesPerSecondAsInt();
+    }
+
+    public static void setInterDCStreamThroughputOutboundMebibytesPerSecAsInt(int value)
+    {
+        if (MEBIBYTES_PER_SECOND.toMegabitsPerSecond(value) >= Integer.MAX_VALUE)
+            throw new IllegalArgumentException("inter_dc_stream_throughput_outbound: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in megabits/s");
+
+        conf.inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
+    }
+
     public static void setInterDCStreamThroughputOutboundMegabitsPerSec(int value)
     {
-        conf.inter_dc_stream_throughput_outbound = DataRateSpec.IntMebibytesPerSecondBound.megabitsPerSecondInMebibytesPerSecond(value);
+        conf.inter_dc_stream_throughput_outbound = DataRateSpec.LongBytesPerSecondBound.megabitsPerSecondInBytesPerSecond(value);
     }
 
-    public static double getEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec()
+    public static double getEntireSSTableInterDCStreamThroughputOutboundBytesPerSec()
     {
-        return conf.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecond();
+        return conf.entire_sstable_inter_dc_stream_throughput_outbound.toBytesPerSecond();
     }
 
-    public static int getEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSecAsInt()
+    public static double getEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec()
     {
-        return conf.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecondAsInt();
+        return conf.entire_sstable_inter_dc_stream_throughput_outbound.toMebibytesPerSecond();
     }
 
     public static void setEntireSSTableInterDCStreamThroughputOutboundMebibytesPerSec(int value)
     {
-        conf.entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.IntMebibytesPerSecondBound(value);
+        if (value == Integer.MAX_VALUE)
+            throw new IllegalArgumentException("entire_sstable_inter_dc_stream_throughput_outbound: " + value +
+                                               " is too large; it should be less than " +
+                                               Integer.MAX_VALUE + " in MiB/s");
+
+        conf.entire_sstable_inter_dc_stream_throughput_outbound = new DataRateSpec.LongBytesPerSecondBound(value, MEBIBYTES_PER_SECOND);
     }
 
     /**
@@ -2977,7 +3092,7 @@ public class DatabaseDescriptor
 
     public static long getMaxHintsFileSize()
     {
-        return  conf.max_hints_file_size.toBytes();
+        return  conf.max_hints_file_size.toBytesInLong();
     }
 
     public static ParameterizedClass getHintsCompression()
@@ -3075,14 +3190,21 @@ public class DatabaseDescriptor
         conf.key_cache_migrate_during_compaction = migrateCacheEntry;
     }
 
+    /** This method can return negative number for disabled */
     public static int getSSTablePreemptiveOpenIntervalInMiB()
     {
+        if (conf.sstable_preemptive_open_interval == null)
+            return -1;
         return conf.sstable_preemptive_open_interval.toMebibytes();
     }
 
+    /** Negative number for disabled */
     public static void setSSTablePreemptiveOpenIntervalInMiB(int mib)
     {
-        conf.sstable_preemptive_open_interval = new DataStorageSpec.IntMebibytesBound(mib);
+        if (mib < 0)
+            conf.sstable_preemptive_open_interval = null;
+        else
+            conf.sstable_preemptive_open_interval = new DataStorageSpec.IntMebibytesBound(mib);
     }
 
     public static boolean getTrickleFsync()
@@ -3315,9 +3437,20 @@ public class DatabaseDescriptor
 
     public static int getIndexSummaryResizeIntervalInMinutes()
     {
+        if (conf.index_summary_resize_interval == null)
+            return -1;
+
         return conf.index_summary_resize_interval.toMinutes();
     }
 
+    public static void setIndexSummaryResizeIntervalInMinutes(int value)
+    {
+        if (value == -1)
+            conf.index_summary_resize_interval = null;
+        else
+            conf.index_summary_resize_interval = new DurationSpec.IntMinutesBound(value);
+    }
+
     public static boolean hasLargeAddressSpace()
     {
         // currently we just check if it's a 64bit arch, but any we only really care if the address space is large
@@ -3455,6 +3588,11 @@ public class DatabaseDescriptor
         return conf.gc_log_threshold.toMilliseconds();
     }
 
+    public static void setGCLogThreshold(int gcLogThreshold)
+    {
+        conf.gc_log_threshold = new DurationSpec.IntMillisecondsBound(gcLogThreshold);
+    }
+
     public static EncryptionContext getEncryptionContext()
     {
         return encryptionContext;
@@ -3465,6 +3603,11 @@ public class DatabaseDescriptor
         return conf.gc_warn_threshold.toMilliseconds();
     }
 
+    public static void setGCWarnThreshold(int threshold)
+    {
+        conf.gc_warn_threshold = new DurationSpec.IntMillisecondsBound(threshold);
+    }
+
     public static boolean isCDCEnabled()
     {
         return conf.cdc_enabled;
@@ -3707,12 +3850,19 @@ public class DatabaseDescriptor
         commitLogSegmentMgrProvider = provider;
     }
 
+    private static DataStorageSpec.IntKibibytesBound createIntKibibyteBoundAndEnsureItIsValidForByteConversion(int kibibytes, String propertyName)
+    {
+        DataStorageSpec.IntKibibytesBound intKibibytesBound = new DataStorageSpec.IntKibibytesBound(kibibytes);
+        checkValidForByteConversion(intKibibytesBound, propertyName);
+        return intKibibytesBound;
+    }
+
     /**
      * Ensures passed in configuration value is positive and will not overflow when converted to Bytes
      */
     private static void checkValidForByteConversion(final DataStorageSpec.IntKibibytesBound value, String name)
     {
-        long valueInBytes = value.toBytes();
+        long valueInBytes = value.toBytesInLong();
         if (valueInBytes < 0 || valueInBytes > Integer.MAX_VALUE - 1)
         {
             throw new ConfigurationException(String.format("%s must be positive value <= %dB, but was %dB",
@@ -4156,7 +4306,7 @@ public class DatabaseDescriptor
 
     public static boolean isUUIDSSTableIdentifiersEnabled()
     {
-        return conf.enable_uuid_sstable_identifiers;
+        return conf.uuid_sstable_identifiers_enabled;
     }
 
     public static DurationSpec.LongNanosecondsBound getRepairStateExpires()
@@ -4212,14 +4362,14 @@ public class DatabaseDescriptor
         conf.max_top_tombstone_partition_count = value;
     }
 
-    public static DataStorageSpec.LongBytesBound getMinTrackedPartitionSize()
+    public static DataStorageSpec.LongBytesBound getMinTrackedPartitionSizeInBytes()
     {
-        return conf.min_tracked_partition_size_bytes;
+        return conf.min_tracked_partition_size;
     }
 
-    public static void setMinTrackedPartitionSize(DataStorageSpec.LongBytesBound spec)
+    public static void setMinTrackedPartitionSizeInBytes(DataStorageSpec.LongBytesBound spec)
     {
-        conf.min_tracked_partition_size_bytes = spec;
+        conf.min_tracked_partition_size = spec;
     }
 
     public static long getMinTrackedPartitionTombstoneCount()
index 98d14a1d326610d38dde2e77dbbe9f3acc1f2370..e84e0e2a9f94d5c7a9b50649f6bba76e45e4724c 100644 (file)
@@ -344,6 +344,20 @@ public class GuardrailsOptions implements GuardrailsConfig
                                   x -> config.drop_truncate_table_enabled = x);
     }
 
+    @Override
+    public boolean getDropKeyspaceEnabled()
+    {
+        return config.drop_keyspace_enabled;
+    }
+
+    public void setDropKeyspaceEnabled(boolean enabled)
+    {
+        updatePropertyWithLogging("drop_keyspace_enabled",
+                                  enabled,
+                                  () -> config.drop_keyspace_enabled,
+                                  x -> config.drop_keyspace_enabled = x);
+    }
+
     @Override
     public boolean getSecondaryIndexesEnabled()
     {
index 09f3f1ecfb749cad3d6075e4bee09f35b906e778..1dcd595080c111bac2e01b501d1c83cdbb8418e8 100644 (file)
@@ -30,6 +30,8 @@ import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
 
+import javax.annotation.Nullable;
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -363,10 +365,13 @@ public class YamlConfigurationLoader implements ConfigurationLoader
 
             return new ForwardingProperty(result.getName(), result)
             {
+                boolean allowsNull = result.getAnnotation(Nullable.class) != null;
+
                 @Override
                 public void set(Object object, Object value) throws Exception
                 {
-                    if (value == null && get(object) != null)
+                    // TODO: CASSANDRA-17785, add @Nullable to all nullable Config properties and remove value == null
+                    if (value == null && get(object) != null && !allowsNull)
                         nullProperties.add(getName());
 
                     result.set(object, value);
index 1d792b24531b5ce73c6f3e847cbd783a180d45cd..1c20e6b0ff1ea2d46f39118cabb713435b6fb166 100644 (file)
@@ -201,7 +201,7 @@ public interface CQL3Type
 
             StringBuilder target = new StringBuilder();
             buffer = buffer.duplicate();
-            int size = CollectionSerializer.readCollectionSize(buffer, version);
+            int size = CollectionSerializer.readCollectionSize(buffer, ByteBufferAccessor.instance, version);
             buffer.position(buffer.position() + CollectionSerializer.sizeOfCollectionSize(size, version));
 
             switch (type.kind)
index e7bf7b99e67cdeb9bee68013a69b7d3ec07d3e8c..dc0645fe60233e4abe680bf64cd9a2ef30e783a4 100644 (file)
@@ -30,7 +30,7 @@ import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.ObjectSizes;
-import org.apache.cassandra.utils.memory.AbstractAllocator;
+import org.apache.cassandra.utils.memory.ByteBufferCloner;
 
 /**
  * Represents an identifer for a CQL column definition.
@@ -209,9 +209,9 @@ public class ColumnIdentifier implements IMeasurableMemory, Comparable<ColumnIde
              + ObjectSizes.sizeOf(text);
     }
 
-    public ColumnIdentifier clone(AbstractAllocator allocator)
+    public ColumnIdentifier clone(ByteBufferCloner cloner)
     {
-        return interned ? this : new ColumnIdentifier(allocator.clone(bytes), text, false);
+        return interned ? this : new ColumnIdentifier(cloner.clone(bytes), text, false);
     }
 
     public int compareTo(ColumnIdentifier that)
index f0a0a7425f23ab65bfdaf158be62cd7a88892cbb..ae01551b69317b985429fbd59b59ae3c277554dd 100644 (file)
@@ -79,7 +79,7 @@ import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class QueryProcessor implements QueryHandler
 {
-    public static final CassandraVersion CQL_VERSION = new CassandraVersion("3.4.5");
+    public static final CassandraVersion CQL_VERSION = new CassandraVersion("3.4.6");
 
     // See comments on QueryProcessor #prepare
     public static final CassandraVersion NEW_PREPARED_STATEMENT_BEHAVIOUR_SINCE_30 = new CassandraVersion("3.0.26");
@@ -788,7 +788,6 @@ public class QueryProcessor implements QueryHandler
         if (previous == prepared)
             SystemKeyspace.writePreparedStatement(keyspace, statementId, queryString);
 
-        SystemKeyspace.writePreparedStatement(keyspace, statementId, queryString);
         ResultSet.PreparedMetadata preparedMetadata = ResultSet.PreparedMetadata.fromPrepared(prepared.statement);
         ResultSet.ResultMetadata resultMetadata = ResultSet.ResultMetadata.fromPrepared(prepared.statement);
         return new ResultMessage.Prepared(statementId, resultMetadata.getResultMetadataId(), preparedMetadata, resultMetadata);
index b8acd5954af4772598fb54941ae0837983dcea3e..6e028c274d31f94e4f3731e29d9d821472201eb7 100644 (file)
@@ -154,14 +154,14 @@ public class Tuples
 
         public static Value fromSerialized(ByteBuffer bytes, TupleType type)
         {
-            ByteBuffer[] values = type.split(bytes);
+            ByteBuffer[] values = type.split(ByteBufferAccessor.instance, bytes);
             if (values.length > type.size())
             {
                 throw new InvalidRequestException(String.format(
                         "Tuple value contained too many fields (expected %s, got %s)", type.size(), values.length));
             }
 
-            return new Value(type.split(bytes));
+            return new Value(type.split(ByteBufferAccessor.instance, bytes));
         }
 
         public ByteBuffer get(ProtocolVersion protocolVersion)
@@ -272,7 +272,8 @@ public class Tuples
                 // type.split(bytes)
                 List<List<ByteBuffer>> elements = new ArrayList<>(l.size());
                 for (Object element : l)
-                    elements.add(Arrays.asList(tupleType.split(type.getElementsType().decompose(element))));
+                    elements.add(Arrays.asList(tupleType.split(ByteBufferAccessor.instance,
+                                                               type.getElementsType().decompose(element))));
                 return new InValue(elements);
             }
             catch (MarshalException e)
index b023a8a0b8f6e37d521d9adf6f0088c3773e2c26..a63420fca3cd3a4d14c4993286bcf303eae51d20 100644 (file)
@@ -217,7 +217,7 @@ public abstract class UserTypes
         public static Value fromSerialized(ByteBuffer bytes, UserType type)
         {
             type.validate(bytes);
-            return new Value(type, type.split(bytes));
+            return new Value(type, type.split(ByteBufferAccessor.instance, bytes));
         }
 
         public ByteBuffer get(ProtocolVersion protocolVersion)
index e3f463a25521fbf5a2bd20125dbe892d9084c56b..68cf2d3782b0f7c505c14ec1b4fbf4c5ac14b239 100644 (file)
@@ -650,8 +650,8 @@ public abstract class ColumnCondition
 
             Cell<?> cell = getCell(row, column);
             return cell == null
-                      ? null
-                      : userType.split(cell.buffer())[userType.fieldPosition(field)];
+                   ? null
+                   : userType.split(ByteBufferAccessor.instance, cell.buffer())[userType.fieldPosition(field)];
         }
 
         private boolean isSatisfiedBy(ByteBuffer rowValue)
index 8c4f74567b791d2db1fe53f93d2539ddce5ee6e6..8d21c1e9ef0941e49884527cedf05629efe5bea4 100644 (file)
@@ -43,13 +43,15 @@ final class AggregateFunctionSelector extends AbstractFunctionSelector<Aggregate
         return true;
     }
 
-    public void addInput(ProtocolVersion protocolVersion, InputRow input)
+    public void addInput(InputRow input)
     {
+        ProtocolVersion protocolVersion = input.getProtocolVersion();
+
         // Aggregation of aggregation is not supported
         for (int i = 0, m = argSelectors.size(); i < m; i++)
         {
             Selector s = argSelectors.get(i);
-            s.addInput(protocolVersion, input);
+            s.addInput(input);
             setArg(i, s.getOutput(protocolVersion));
             s.reset();
         }
diff --git a/src/java/org/apache/cassandra/cql3/selection/ColumnTimestamps.java b/src/java/org/apache/cassandra/cql3/selection/ColumnTimestamps.java
new file mode 100644 (file)
index 0000000..6a08076
--- /dev/null
@@ -0,0 +1,394 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.cql3.selection;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import com.google.common.collect.BoundType;
+import com.google.common.collect.Range;
+
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.UserType;
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.serializers.CollectionSerializer;
+import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+/**
+ * Represents a list of timestamps associated to a CQL column. Those timestamps can either be writetimes or TTLs,
+ * according to {@link TimestampsType}.
+ */
+abstract class ColumnTimestamps
+{
+    /**
+     * The timestamps type.
+     */
+    protected final TimestampsType type;
+
+    protected ColumnTimestamps(TimestampsType type)
+    {
+        this.type = type;
+    }
+
+    /**
+     * @return the timestamps type
+     */
+    public TimestampsType type()
+    {
+        return type;
+    }
+
+    /**
+     * Retrieves the timestamps at the specified position.
+     *
+     * @param index the timestamps position
+     * @return the timestamps at the specified position or a {@link #NO_TIMESTAMP}
+     */
+    public abstract ColumnTimestamps get(int index);
+
+    public abstract ColumnTimestamps max();
+
+    /**
+     * Returns a view of the portion of the timestamps within the specified range.
+     *
+     * @param range the indexes range
+     * @return a view of the specified range within this {@link ColumnTimestamps}
+     */
+    public abstract ColumnTimestamps slice(Range<Integer> range);
+
+    /**
+     * Converts the timestamps into their serialized form.
+     *
+     * @param protocolVersion the protocol version to use for the serialization
+     * @return the serialized timestamps
+     */
+    public abstract ByteBuffer toByteBuffer(ProtocolVersion protocolVersion);
+
+    /**
+     * Appends an empty timestamp at the end of this list.
+     */
+    public abstract void addNoTimestamp();
+
+    /**
+     * Appends the timestamp of the specified cell at the end of this list.
+     */
+    public abstract void addTimestampFrom(Cell<?> cell, int nowInSecond);
+
+    /**
+     * Creates a new {@link ColumnTimestamps} instance for the specified column type.
+     *
+     * @param timestampType the timestamps type
+     * @param columnType    the column type
+     * @return a {@link ColumnTimestamps} instance for the specified column type
+     */
+    static ColumnTimestamps newTimestamps(TimestampsType timestampType, AbstractType<?> columnType)
+    {
+        if (!columnType.isMultiCell())
+            return new SingleTimestamps(timestampType);
+
+        // For UserType we know that the size will not change, so we can initialize the array with the proper capacity.
+        if (columnType instanceof UserType)
+            return new MultipleTimestamps(timestampType, ((UserType) columnType).size());
+
+        return new MultipleTimestamps(timestampType, 0);
+    }
+
+    /**
+     * The type of represented timestamps.
+     */
+    public enum TimestampsType
+    {
+        WRITETIMES
+        {
+            @Override
+            long getTimestamp(Cell<?> cell, int nowInSecond)
+            {
+                return cell.timestamp();
+            }
+
+            @Override
+            long defaultValue()
+            {
+                return Long.MIN_VALUE;
+            }
+
+            @Override
+            ByteBuffer toByteBuffer(long timestamp)
+            {
+                return timestamp == defaultValue() ? null : ByteBufferUtil.bytes(timestamp);
+            }
+        },
+        TTLS
+        {
+            @Override
+            long getTimestamp(Cell<?> cell, int nowInSecond)
+            {
+                if (!cell.isExpiring())
+                    return defaultValue();
+
+                int remaining = cell.localDeletionTime() - nowInSecond;
+                return remaining >= 0 ? remaining : defaultValue();
+            }
+
+            @Override
+            long defaultValue()
+            {
+                return -1;
+            }
+
+            @Override
+            ByteBuffer toByteBuffer(long timestamp)
+            {
+                return timestamp == defaultValue() ? null : ByteBufferUtil.bytes((int) timestamp);
+            }
+        };
+
+        /**
+         * Extracts the timestamp from the specified cell.
+         *
+         * @param cell        the cell
+         * @param nowInSecond the query timestamp insecond
+         * @return the timestamp corresponding to this type
+         */
+        abstract long getTimestamp(Cell<?> cell, int nowInSecond);
+
+        /**
+         * Returns the value to use when there is no timestamp.
+         *
+         * @return the value to use when there is no timestamp
+         */
+        abstract long defaultValue();
+
+        /**
+         * Serializes the specified timestamp.
+         *
+         * @param timestamp the timestamp to serialize
+         * @return the bytes corresponding to the specified timestamp
+         */
+        abstract ByteBuffer toByteBuffer(long timestamp);
+    }
+
+    /**
+     * A {@link ColumnTimestamps} that doesn't contain any timestamps.
+     */
+    static final ColumnTimestamps NO_TIMESTAMP = new ColumnTimestamps(null)
+    {
+        @Override
+        public ColumnTimestamps get(int index)
+        {
+            return this;
+        }
+
+        @Override
+        public ColumnTimestamps max()
+        {
+            return this;
+        }
+
+        @Override
+        public ColumnTimestamps slice(Range<Integer> range)
+        {
+            return this;
+        }
+
+        @Override
+        public ByteBuffer toByteBuffer(ProtocolVersion protocolVersion)
+        {
+            return null;
+        }
+
+        @Override
+        public void addNoTimestamp()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public void addTimestampFrom(Cell<?> cell, int nowInSecond)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public String toString()
+        {
+            return "no timestamp";
+        }
+    };
+
+    /**
+     * A {@link ColumnTimestamps} that can contains a single timestamp (for columns that aren't multicell).
+     */
+    private static class SingleTimestamps extends ColumnTimestamps
+    {
+        protected long timestamp;
+
+        public SingleTimestamps(TimestampsType type)
+        {
+            this(type, type.defaultValue());
+        }
+
+        public SingleTimestamps(TimestampsType type, long timestamp)
+        {
+            super(type);
+            this.timestamp = timestamp;
+        }
+
+        @Override
+        public void addNoTimestamp()
+        {
+            timestamp = type.defaultValue();
+        }
+
+        @Override
+        public void addTimestampFrom(Cell<?> cell, int nowInSecond)
+        {
+            timestamp = type.getTimestamp(cell, nowInSecond);
+        }
+
+        @Override
+        public ColumnTimestamps get(int index)
+        {
+            // If this method is called it means that it is an element selection on a frozen collection/UDT,
+            // so we can safely return this Timestamps as all the elements also share that timestamp
+            return this;
+        }
+
+        @Override
+        public ColumnTimestamps max()
+        {
+            return this;
+        }
+
+        @Override
+        public ColumnTimestamps slice(Range<Integer> range)
+        {
+            return range.isEmpty() ? NO_TIMESTAMP : this;
+        }
+
+        @Override
+        public ByteBuffer toByteBuffer(ProtocolVersion protocolVersion)
+        {
+            return timestamp == type.defaultValue() ? null : type.toByteBuffer(timestamp);
+        }
+
+        @Override
+        public String toString()
+        {
+            return type + ": " + timestamp;
+        }
+    }
+
+    /**
+     * A {@link ColumnTimestamps} that can contain multiple timestamps (for unfrozen collections or UDTs).
+     */
+    private static final class MultipleTimestamps extends ColumnTimestamps
+    {
+        private final List<Long> timestamps;
+
+        public MultipleTimestamps(TimestampsType type, int initialCapacity)
+        {
+            this(type, new ArrayList<>(initialCapacity));
+        }
+
+        public MultipleTimestamps(TimestampsType type, List<Long> timestamps)
+        {
+            super(type);
+            this.timestamps = timestamps;
+        }
+
+        @Override
+        public void addNoTimestamp()
+        {
+            timestamps.add(type.defaultValue());
+        }
+
+        @Override
+        public void addTimestampFrom(Cell<?> cell, int nowInSecond)
+        {
+            timestamps.add(type.getTimestamp(cell, nowInSecond));
+        }
+
+        @Override
+        public ColumnTimestamps get(int index)
+        {
+            return timestamps.isEmpty()
+                   ? NO_TIMESTAMP
+                   : new SingleTimestamps(type, timestamps.get(index));
+        }
+
+        @Override
+        public ColumnTimestamps max()
+        {
+            return timestamps.isEmpty()
+                   ? NO_TIMESTAMP
+                   : new SingleTimestamps(type, Collections.max(timestamps));
+        }
+
+        @Override
+        public ColumnTimestamps slice(Range<Integer> range)
+        {
+            if (range.isEmpty())
+                return NO_TIMESTAMP;
+
+            // Prepare the "from" argument for the call to List#sublist below. That argument is always specified and
+            // inclusive, whereas the range lower bound can be open, closed or not specified.
+            int from = 0;
+            if (range.hasLowerBound())
+            {
+                from = range.lowerBoundType() == BoundType.CLOSED
+                       ? range.lowerEndpoint() // inclusive range lower bound, inclusive "from" is the same list position
+                       : range.lowerEndpoint() + 1; // exclusive range lower bound, inclusive "from" is the next list position
+            }
+
+            // Prepare the "to" argument for the call to List#sublist below. That argument is always specified and
+            // exclusive, whereas the range upper bound can be open, closed or not specified.
+            int to = timestamps.size();
+            if (range.hasUpperBound())
+            {
+                to = range.upperBoundType() == BoundType.CLOSED
+                     ? range.upperEndpoint() + 1 // inclusive range upper bound, exclusive "to" is the next list position
+                     : range.upperEndpoint(); // exclusive range upper bound, exclusive "to" is the same list position
+            }
+
+            return new MultipleTimestamps(type, timestamps.subList(from, to));
+        }
+
+        @Override
+        public ByteBuffer toByteBuffer(ProtocolVersion protocolVersion)
+        {
+            if (timestamps.isEmpty())
+                return null;
+
+            List<ByteBuffer> buffers = new ArrayList<>(timestamps.size());
+            timestamps.forEach(timestamp -> buffers.add(type.toByteBuffer(timestamp)));
+
+            return CollectionSerializer.pack(buffers, timestamps.size(), protocolVersion);
+        }
+
+        @Override
+        public String toString()
+        {
+            return type + ": " + timestamps;
+        }
+    }
+}
index 851d78574c8c0bd802be25bd529065d73dda6857..930fd83f9451c90fcccee402281e8ddee2623096 100644 (file)
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import com.google.common.base.Objects;
+import com.google.common.collect.Range;
 
 import org.apache.cassandra.cql3.ColumnSpecification;
 import org.apache.cassandra.cql3.QueryOptions;
@@ -43,12 +44,19 @@ import org.apache.cassandra.utils.ByteBufferUtil;
  */
 abstract class ElementsSelector extends Selector
 {
+    /**
+     * An empty collection is composed of an int size of zero.
+     */
+    private static final ByteBuffer EMPTY_FROZEN_COLLECTION = ByteBufferUtil.bytes(0);
+
     protected final Selector selected;
+    protected final CollectionType<?> type;
 
     protected ElementsSelector(Kind kind,Selector selected)
     {
         super(kind);
         this.selected = selected;
+        this.type = (CollectionType<?>) selected.getType();
     }
 
     private static boolean isUnset(ByteBuffer bb)
@@ -226,9 +234,14 @@ abstract class ElementsSelector extends Selector
 
     protected abstract ByteBuffer extractSelection(ByteBuffer collection);
 
-    public void addInput(ProtocolVersion protocolVersion, InputRow input)
+    public void addInput(InputRow input)
     {
-        selected.addInput(protocolVersion, input);
+        selected.addInput(input);
+    }
+
+    protected Range<Integer> getIndexRange(ByteBuffer output, ByteBuffer fromKey, ByteBuffer toKey)
+    {
+        return type.getSerializer().getIndexesRangeFromSerialized(output, fromKey, toKey, keyType(type));
     }
 
     public void reset()
@@ -255,14 +268,12 @@ abstract class ElementsSelector extends Selector
             }
         };
 
-        private final CollectionType<?> type;
         private final ByteBuffer key;
 
         private ElementSelector(Selector selected, ByteBuffer key)
         {
             super(Kind.ELEMENT_SELECTOR, selected);
             assert selected.getType() instanceof MapType || selected.getType() instanceof SetType : "this shouldn't have passed validation in Selectable";
-            this.type = (CollectionType<?>) selected.getType();
             this.key = key;
         }
 
@@ -284,6 +295,31 @@ abstract class ElementsSelector extends Selector
             return type.getSerializer().getSerializedValue(collection, key, keyType(type));
         }
 
+        protected int getElementIndex(ProtocolVersion protocolVersion, ByteBuffer key)
+        {
+            ByteBuffer output = selected.getOutput(protocolVersion);
+            return output == null ? -1 : type.getSerializer().getIndexFromSerialized(output, key, keyType(type));
+        }
+
+        @Override
+        protected ColumnTimestamps getWritetimes(ProtocolVersion protocolVersion)
+        {
+            return getElementTimestamps(protocolVersion, selected.getWritetimes(protocolVersion));
+        }
+
+        @Override
+        protected ColumnTimestamps getTTLs(ProtocolVersion protocolVersion)
+        {
+            return getElementTimestamps(protocolVersion, selected.getTTLs(protocolVersion));
+        }
+
+        private ColumnTimestamps getElementTimestamps(ProtocolVersion protocolVersion,
+                                                      ColumnTimestamps timestamps)
+        {
+            int index = getElementIndex(protocolVersion, key);
+            return index == -1 ? ColumnTimestamps.NO_TIMESTAMP : timestamps.get(index);
+        }
+
         public AbstractType<?> getType()
         {
             return valueType(type);
@@ -348,8 +384,6 @@ abstract class ElementsSelector extends Selector
             }
         };
 
-        private final CollectionType<?> type;
-
         // Note that neither from nor to can be null, but they can both be ByteBufferUtil.UNSET_BYTE_BUFFER to represent no particular bound
         private final ByteBuffer from;
         private final ByteBuffer to;
@@ -359,7 +393,6 @@ abstract class ElementsSelector extends Selector
             super(Kind.SLICE_SELECTOR, selected);
             assert selected.getType() instanceof MapType || selected.getType() instanceof SetType : "this shouldn't have passed validation in Selectable";
             assert from != null && to != null : "We can have unset buffers, but not nulls";
-            this.type = (CollectionType<?>) selected.getType();
             this.from = from;
             this.to = to;
         }
@@ -382,6 +415,36 @@ abstract class ElementsSelector extends Selector
             return type.getSerializer().getSliceFromSerialized(collection, from, to, type.nameComparator(), type.isFrozenCollection());
         }
 
+        @Override
+        protected ColumnTimestamps getWritetimes(ProtocolVersion protocolVersion)
+        {
+            return getTimestampsSlice(protocolVersion, selected.getWritetimes(protocolVersion));
+        }
+
+        @Override
+        protected ColumnTimestamps getTTLs(ProtocolVersion protocolVersion)
+        {
+            return getTimestampsSlice(protocolVersion, selected.getTTLs(protocolVersion));
+        }
+
+        protected ColumnTimestamps getTimestampsSlice(ProtocolVersion protocolVersion, ColumnTimestamps timestamps)
+        {
+            ByteBuffer output = selected.getOutput(protocolVersion);
+            return (output == null || isCollectionEmpty(output))
+                   ? ColumnTimestamps.NO_TIMESTAMP
+                   : timestamps.slice(getIndexRange(output, from, to) );
+        }
+
+        /**
+         * Checks if the collection is empty. Only frozen collection can be empty.
+         * @param output the serialized collection
+         * @return {@code true} if the collection is empty {@code false} otherwise.
+         */
+        private boolean isCollectionEmpty(ByteBuffer output)
+        {
+            return EMPTY_FROZEN_COLLECTION.equals(output);
+        }
+
         public AbstractType<?> getType()
         {
             return type;
index 0c623976810ed3e112b066dc84c3e74dc34c3fa3..043c3ee0c9d24f7b918f50f4071df971c4929f54 100644 (file)
@@ -27,6 +27,7 @@ import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.ByteBufferAccessor;
 import org.apache.cassandra.db.marshal.UserType;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.io.util.DataInputPlus;
@@ -98,9 +99,9 @@ final class FieldSelector extends Selector
         selected.addFetchedColumns(builder);
     }
 
-    public void addInput(ProtocolVersion protocolVersion, InputRow input)
+    public void addInput(InputRow input)
     {
-        selected.addInput(protocolVersion, input);
+        selected.addInput(input);
     }
 
     public ByteBuffer getOutput(ProtocolVersion protocolVersion)
@@ -108,10 +109,26 @@ final class FieldSelector extends Selector
         ByteBuffer value = selected.getOutput(protocolVersion);
         if (value == null)
             return null;
-        ByteBuffer[] buffers = type.split(value);
+        ByteBuffer[] buffers = type.split(ByteBufferAccessor.instance, value);
         return field < buffers.length ? buffers[field] : null;
     }
 
+    @Override
+    protected ColumnTimestamps getWritetimes(ProtocolVersion protocolVersion)
+    {
+        return getOutput(protocolVersion) == null
+               ? ColumnTimestamps.NO_TIMESTAMP
+               : selected.getWritetimes(protocolVersion).get(field);
+    }
+
+    @Override
+    protected ColumnTimestamps getTTLs(ProtocolVersion protocolVersion)
+    {
+        return getOutput(protocolVersion) == null
+               ? ColumnTimestamps.NO_TIMESTAMP
+               : selected.getTTLs(protocolVersion).get(field);
+    }
+
     public AbstractType<?> getType()
     {
         return type.fieldType(field);
index 9136ab2c5b2f59f94601caaf3cdd4e58717c75c3..a99822ec0c52dbc424396965268f82cb7c4b0f3e 100644 (file)
@@ -89,10 +89,10 @@ final class ListSelector extends Selector
             elements.get(i).addFetchedColumns(builder);
     }
 
-    public void addInput(ProtocolVersion protocolVersion, InputRow input)
+    public void addInput(InputRow input)
     {
         for (int i = 0, m = elements.size(); i < m; i++)
-            elements.get(i).addInput(protocolVersion, input);
+            elements.get(i).addInput(input);
     }
 
     public ByteBuffer getOutput(ProtocolVersion protocolVersion)
index a43d7bfccbf95901f89a31e801c1b293dbf05a1a..41344a6eae7b94096e492cee7a87870a0dc0f79b 100644 (file)
@@ -35,7 +35,6 @@ import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.filter.ColumnFilter.Builder;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.MapType;
-import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.schema.ColumnMetadata;
@@ -194,13 +193,13 @@ final class MapSelector extends Selector
         }
     }
 
-    public void addInput(ProtocolVersion protocolVersion, InputRow input)
+    public void addInput(InputRow input)
     {
         for (int i = 0, m = elements.size(); i < m; i++)
         {
             Pair<Selector, Selector> pair = elements.get(i);
-            pair.left.addInput(protocolVersion, input);
-            pair.right.addInput(protocolVersion, input);
+            pair.left.addInput(input);
+            pair.right.addInput(input);
         }
     }
 
index 3e652dfeb4ec138c5d1ef6e83f936725c13c1acc..37e877261619485c9aebcfa06b81cd7143061705 100644 (file)
@@ -31,7 +31,10 @@ import org.apache.cassandra.db.Clustering;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.aggregation.GroupMaker;
 import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.ColumnData;
 import org.apache.cassandra.db.rows.ComplexColumnData;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.transport.ProtocolVersion;
 
 public final class ResultSetBuilder
 {
@@ -102,30 +105,14 @@ public final class ResultSetBuilder
         inputRow.add(v);
     }
 
-    public void add(ComplexColumnData complexColumnData, Function<Iterator<Cell<?>>, ByteBuffer> serializer)
+    public void add(Cell<?> c, int nowInSec)
     {
-        if (complexColumnData == null)
-        {
-            inputRow.add(null);
-            return;
-        }
-
-        long timestamp = -1L;
-        if (selectors.collectMaxTimestamps())
-        {
-            Iterator<Cell<?>> cells = complexColumnData.iterator();
-            while (cells.hasNext())
-            {
-                timestamp = Math.max(timestamp, cells.next().timestamp());
-            }
-        }
-
-        inputRow.add(serializer.apply(complexColumnData.iterator()), timestamp, -1);
+        inputRow.add(c, nowInSec);
     }
 
-    public void add(Cell<?> c, int nowInSec)
+    public void add(ColumnData columnData, int nowInSec)
     {
-        inputRow.add(c, nowInSec);
+        inputRow.add(columnData, nowInSec);
     }
 
     /**
@@ -134,7 +121,7 @@ public final class ResultSetBuilder
      * @param partitionKey the partition key of the new row
      * @param clustering the clustering of the new row
      */
-    public void newRow(DecoratedKey partitionKey, Clustering<?> clustering)
+    public void newRow(ProtocolVersion protocolVersion, DecoratedKey partitionKey, Clustering<?> clustering, List<ColumnMetadata> columns)
     {
         // The groupMaker needs to be called for each row
         boolean isNewAggregate = groupMaker == null || groupMaker.isNewGroup(partitionKey, clustering);
@@ -154,7 +141,10 @@ public final class ResultSetBuilder
         }
         else
         {
-            inputRow = new Selector.InputRow(selectors.numberOfFetchedColumns(), selectors.collectTimestamps(), selectors.collectTTLs());
+            inputRow = new Selector.InputRow(protocolVersion,
+                                             columns,
+                                             selectors.collectWritetimes(),
+                                             selectors.collectTTLs());
         }
     }
 
diff --git a/src/java/org/apache/cassandra/cql3/selection/RowTimestamps.java b/src/java/org/apache/cassandra/cql3/selection/RowTimestamps.java
new file mode 100644 (file)
index 0000000..24d23ee
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.cql3.selection;
+
+import java.util.List;
+
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.schema.ColumnMetadata;
+
+/**
+ * The {@link ColumnTimestamps} associated to the given set of columns of a row.
+ */
+interface RowTimestamps
+{
+    /**
+     * Adds an empty timestamp for the specified column.
+     *
+     * @param index the column index
+     */
+    void addNoTimestamp(int index);
+
+    /**
+     * Adds the timestamp of the specified cell.
+     *
+     * @param index the column index
+     * @param cell the cell to get the timestamp from
+     * @param nowInSec the query timestamp in second
+     */
+    void addTimestamp(int index, Cell<?> cell, int nowInSec);
+
+    /**
+     * Returns the timestamp of the specified column.
+     *
+     * @param index the column index
+     * @return the timestamp of the specified column
+     */
+    ColumnTimestamps get(int index);
+
+    /**
+     * A {@code RowTimestamps} that does nothing.
+     */
+    RowTimestamps NOOP_ROW_TIMESTAMPS = new RowTimestamps()
+    {
+        @Override
+        public void addNoTimestamp(int index)
+        {
+        }
+
+        @Override
+        public void addTimestamp(int index, Cell<?> cell, int nowInSec)
+        {
+        }
+
+        @Override
+        public ColumnTimestamps get(int index)
+        {
+            return ColumnTimestamps.NO_TIMESTAMP;
+        }
+    };
+
+    static RowTimestamps newInstance(ColumnTimestamps.TimestampsType type, List<ColumnMetadata> columns)
+    {
+        final ColumnTimestamps[] array = new ColumnTimestamps[columns.size()];
+
+        for (int i = 0, m = columns.size(); i < m; i++)
+            array[i] = ColumnTimestamps.newTimestamps(type, columns.get(i).type);
+
+        return new RowTimestamps()
+        {
+            @Override
+            public void addNoTimestamp(int index)
+            {
+                array[index].addNoTimestamp();
+            }
+
+            @Override
+            public void addTimestamp(int index, Cell<?> cell, int nowInSec)
+            {
+                array[index].addTimestampFrom(cell, nowInSec);
+            }
+
+            @Override
+            public ColumnTimestamps get(int index)
+            {
+                return array[index];
+            }
+        };
+    }
+}
index ed2a1406be59e5045c7cc55ad7318fabe78334c9..5e9711a9e7f93bf4006a94aee40a6c9ed9e32010 100644 (file)
@@ -37,12 +37,12 @@ final class ScalarFunctionSelector extends AbstractFunctionSelector<ScalarFuncti
         }
     };
 
-    public void addInput(ProtocolVersion protocolVersion, InputRow input)
+    public void addInput(InputRow input)
     {
         for (int i = 0, m = argSelectors.size(); i < m; i++)
         {
             Selector s = argSelectors.get(i);
-            s.addInput(protocolVersion, input);
+            s.addInput(input);
         }
     }
 
index 4210f9cf3cf1bec2aa56545ac46ddc0150c3b4ce..88e70f8ce3ac8f6aaaf97d69bf1f254f01d0fa2e 100644 (file)
@@ -243,25 +243,27 @@ public interface Selectable extends AssignmentTestable
                 this.returnType = returnType;
             }
 
-            public boolean allowedForMultiCell()
+            public boolean aggregatesMultiCell()
             {
                 return this == MAX_WRITE_TIME;
             }
         }
 
         public final ColumnMetadata column;
+        public final Selectable selectable;
         public final Kind kind;
 
-        public WritetimeOrTTL(ColumnMetadata column, Kind kind)
+        public WritetimeOrTTL(ColumnMetadata column, Selectable selectable, Kind kind)
         {
             this.column = column;
+            this.selectable = selectable;
             this.kind = kind;
         }
 
         @Override
         public String toString()
         {
-            return kind.name + "(" + column.name + ")";
+            return kind.name + "(" + selectable + ")";
         }
 
         public Selector.Factory newSelectorFactory(TableMetadata table,
@@ -275,42 +277,42 @@ public interface Selectable extends AssignmentTestable
                                       kind.name,
                                       column.name));
 
-            // only maxwritetime is allowed for multicell types
-            if (column.type.isMultiCell() && !kind.allowedForMultiCell())
-                throw new InvalidRequestException(String.format("Cannot use selection function %s on non-frozen %s %s",
-                                                                kind.name,
-                                                                column.type.isCollection() ? "collection" : "UDT",
-                                                                column.name));
+            Selector.Factory factory = selectable.newSelectorFactory(table, expectedType, defs, boundNames);
+            boolean isMultiCell = factory.getColumnSpecification(table).type.isMultiCell();
 
-            return WritetimeOrTTLSelector.newFactory(column, addAndGetIndex(column, defs), kind);
+            return WritetimeOrTTLSelector.newFactory(factory, addAndGetIndex(column, defs), kind, isMultiCell);
         }
 
+        @Override
         public AbstractType<?> getExactTypeIfKnown(String keyspace)
         {
-            return kind.returnType;
+            AbstractType<?> type = kind.returnType;
+            return column.type.isMultiCell() && !kind.aggregatesMultiCell() ? ListType.getInstance(type, false) : type;
         }
 
         @Override
         public boolean selectColumns(Predicate<ColumnMetadata> predicate)
         {
-            return predicate.test(column);
+            return selectable.selectColumns(predicate);
         }
 
         public static class Raw implements Selectable.Raw
         {
-            private final Selectable.RawIdentifier id;
+            private final Selectable.RawIdentifier column;
+            private final Selectable.Raw selected;
             private final Kind kind;
 
-            public Raw(Selectable.RawIdentifier id, Kind kind)
+            public Raw(Selectable.RawIdentifier column, Selectable.Raw selected, Kind kind)
             {
-                this.id = id;
+                this.column = column;
+                this.selected = selected;
                 this.kind = kind;
             }
 
             @Override
             public WritetimeOrTTL prepare(TableMetadata table)
             {
-                return new WritetimeOrTTL(id.prepare(table), kind);
+                return new WritetimeOrTTL(column.prepare(table), selected.prepare(table), kind);
             }
         }
     }
index 2f41192c373e2dfc668fe2775f35661caf69a9f6..866497a16287b3954442e29b8aa2e123b483bc7e 100644 (file)
@@ -371,16 +371,10 @@ public abstract class Selection
         public boolean collectTTLs();
 
         /**
-         * Checks if one of the selectors collect timestamps.
-         * @return {@code true} if one of the selectors collect timestamps, {@code false} otherwise.
+         * Checks if one of the selectors collects write timestamps.
+         * @return {@code true} if one of the selectors collects write timestamps, {@code false} otherwise.
          */
-        public boolean collectTimestamps();
-
-        /**
-         * Checks if one of the selectors collects maxTimestamps.
-         * @return {@code true} if one of the selectors collect maxTimestamps, {@code false} otherwise.
-         */
-        public boolean collectMaxTimestamps();
+        public boolean collectWritetimes();
 
         /**
          * Adds the current row of the specified <code>ResultSetBuilder</code>.
@@ -507,16 +501,11 @@ public abstract class Selection
                 }
 
                 @Override
-                public boolean collectTimestamps()
+                public boolean collectWritetimes()
                 {
                     return false;
                 }
 
-                @Override
-                public boolean collectMaxTimestamps() {
-                    return false;
-                }
-
                 @Override
                 public ColumnFilter getColumnFilter()
                 {
@@ -531,8 +520,8 @@ public abstract class Selection
     private static class SelectionWithProcessing extends Selection
     {
         private final SelectorFactories factories;
-        private final boolean collectTimestamps;
-        private final boolean collectMaxTimestamps;
+        private final boolean collectWritetimes;
+        private final boolean collectMaxWritetimes;
         private final boolean collectTTLs;
 
         public SelectionWithProcessing(TableMetadata table,
@@ -552,8 +541,8 @@ public abstract class Selection
                   isJson);
 
             this.factories = factories;
-            this.collectTimestamps = factories.containsWritetimeSelectorFactory();
-            this.collectMaxTimestamps = factories.containsMaxWritetimeSelectorFactory();
+            this.collectWritetimes = factories.containsWritetimeSelectorFactory();
+            this.collectMaxWritetimes = factories.containsMaxWritetimeSelectorFactory();
             this.collectTTLs = factories.containsTTLSelectorFactory();
 
             for (ColumnMetadata orderingColumn : orderingColumns)
@@ -614,7 +603,7 @@ public abstract class Selection
                 public void addInputRow(InputRow input)
                 {
                     for (Selector selector : selectors)
-                        selector.addInput(options.getProtocolVersion(), input);
+                        selector.addInput(input);
                 }
 
                 @Override
@@ -630,14 +619,9 @@ public abstract class Selection
                 }
 
                 @Override
-                public boolean collectTimestamps()
+                public boolean collectWritetimes()
                 {
-                    return collectTimestamps || collectMaxTimestamps;
-                }
-
-                @Override
-                public boolean collectMaxTimestamps() {
-                    return collectMaxTimestamps;
+                    return collectWritetimes || collectMaxWritetimes;
                 }
 
                 @Override
index 8226c2d5d26aeb0d9d4bc8b2604fc388bbda4421..2d52e569cf7fb5c3a885273bde5beb52d20e0908 100644 (file)
@@ -20,26 +20,30 @@ package org.apache.cassandra.cql3.selection;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
-import java.util.Iterator;
 import java.util.List;
 
-import org.apache.cassandra.db.rows.ComplexColumnData;
-import org.apache.cassandra.schema.CQLTypeParser;
-import org.apache.cassandra.schema.KeyspaceMetadata;
-import org.apache.cassandra.schema.Schema;
-import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.cql3.ColumnSpecification;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.cql3.functions.Function;
+import org.apache.cassandra.cql3.selection.ColumnTimestamps.TimestampsType;
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.context.CounterContext;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.CollectionType;
+import org.apache.cassandra.db.marshal.UserType;
 import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.ColumnData;
+import org.apache.cassandra.db.rows.ComplexColumnData;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.schema.CQLTypeParser;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.KeyspaceMetadata;
+import org.apache.cassandra.schema.Schema;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
@@ -178,7 +182,6 @@ public abstract class Selector
         /**
          * Checks if this factory creates <code>Selector</code>s that simply return a column value.
          *
-         * @param index the column index
          * @return <code>true</code> if this factory creates <code>Selector</code>s that simply return a column value,
          * <code>false</code> otherwise.
          */
@@ -301,82 +304,135 @@ public abstract class Selector
      */
     public static final class InputRow
     {
+        private final ProtocolVersion protocolVersion;
+        private final List<ColumnMetadata> columns;
+        private final boolean collectWritetimes;
+        private final boolean collectTTLs;
+
         private ByteBuffer[] values;
-        private final long[] timestamps;
-        private final int[] ttls;
+        private RowTimestamps writetimes;
+        private RowTimestamps ttls;
         private int index;
 
-        public InputRow(int size, boolean collectTimestamps, boolean collectTTLs)
+        public InputRow(ProtocolVersion protocolVersion, List<ColumnMetadata> columns)
         {
-            this.values = new ByteBuffer[size];
+            this(protocolVersion, columns, false, false);
+        }
 
-            if (collectTimestamps)
-            {
-                this.timestamps = new long[size];
-                // We use MIN_VALUE to indicate no timestamp
-                Arrays.fill(timestamps, Long.MIN_VALUE);
-            }
-            else
-            {
-                timestamps = null;
-            }
+        public InputRow(ProtocolVersion protocolVersion,
+                        List<ColumnMetadata> columns,
+                        boolean collectWritetimes,
+                        boolean collectTTLs)
+        {
+            this.protocolVersion = protocolVersion;
+            this.columns = columns;
+            this.collectWritetimes = collectWritetimes;
+            this.collectTTLs = collectTTLs;
+
+            values = new ByteBuffer[columns.size()];
+            writetimes = initTimestamps(TimestampsType.WRITETIMES, collectWritetimes, columns);
+            ttls = initTimestamps(TimestampsType.TTLS, collectTTLs, columns);
+        }
 
-            if (collectTTLs)
-            {
-                this.ttls = new int[size];
-                // We use -1 to indicate no ttl
-                Arrays.fill(ttls, -1);
-            }
-            else
-            {
-                ttls = null;
-            }
+        private RowTimestamps initTimestamps(TimestampsType type,
+                                             boolean collectWritetimes,
+                                             List<ColumnMetadata> columns)
+        {
+            return collectWritetimes ? RowTimestamps.newInstance(type, columns)
+                                     : RowTimestamps.NOOP_ROW_TIMESTAMPS;
         }
 
-        public void add(ByteBuffer v)
+        public ProtocolVersion getProtocolVersion()
         {
-            add(v, Long.MIN_VALUE, -1);
+            return protocolVersion;
         }
 
-        public void add(ByteBuffer v, long timestamp, int ttl)
+        public void add(ByteBuffer v)
         {
             values[index] = v;
 
-            if (timestamps != null)
-                timestamps[index] = timestamp;
-
-            if (ttls != null)
-                ttls[index] = ttl;
-
+            if (v != null)
+            {
+                writetimes.addNoTimestamp(index);
+                ttls.addNoTimestamp(index);
+            }
             index++;
         }
 
-        public void add(Cell<?> c, int nowInSec)
+        public void add(ColumnData columnData, int nowInSec)
         {
-            if (c == null)
+            ColumnMetadata column = columns.get(index);
+            if (columnData == null)
             {
                 add(null);
-                return;
             }
+            else
+            {
+                if (column.isComplex())
+                {
+                    add((ComplexColumnData) columnData, nowInSec);
+                }
+                else
+                {
+                    add((Cell<?>) columnData, nowInSec);
+                }
+            }
+        }
 
+        private void add(Cell<?> c, int nowInSec)
+        {
             values[index] = value(c);
-
-            if (timestamps != null)
-                timestamps[index] = c.timestamp();
-
-            if (ttls != null)
-                ttls[index] = remainingTTL(c, nowInSec);
-
+            writetimes.addTimestamp(index, c, nowInSec);
+            ttls.addTimestamp(index, c, nowInSec);
             index++;
         }
 
-        private int remainingTTL(Cell<?> c, int nowInSec)
+        private void add(ComplexColumnData ccd, int nowInSec)
         {
-            if (!c.isExpiring())
-                return -1;
+            AbstractType<?> type = columns.get(index).type;
+            if (type.isCollection())
+            {
+                values[index] = ((CollectionType<?>) type).serializeForNativeProtocol(ccd.iterator(), protocolVersion);
 
-            int remaining = c.localDeletionTime() - nowInSec;
-            return remaining >= 0 ? remaining : -1;
+                for (Cell<?> cell : ccd)
+                {
+                    writetimes.addTimestamp(index, cell, nowInSec);
+                    ttls.addTimestamp(index, cell, nowInSec);
+                }
+            }
+            else
+            {
+                UserType udt = (UserType) type;
+                int size = udt.size();
+
+                values[index] = udt.serializeForNativeProtocol(ccd.iterator(), protocolVersion);
+
+                short fieldPosition = 0;
+                for (Cell<?> cell : ccd)
+                {
+                    // handle null fields that aren't at the end
+                    short fieldPositionOfCell = ByteBufferUtil.toShort(cell.path().get(0));
+                    while (fieldPosition < fieldPositionOfCell)
+                    {
+                        fieldPosition++;
+                        writetimes.addNoTimestamp(index);
+                        ttls.addNoTimestamp(index);
+                    }
+
+                    fieldPosition++;
+                    writetimes.addTimestamp(index, cell, nowInSec);
+                    ttls.addTimestamp(index, cell, nowInSec);
+                }
+
+                // append nulls for missing cells
+                while (fieldPosition < size)
+                {
+                    fieldPosition++;
+                    writetimes.addNoTimestamp(index);
+                    ttls.addNoTimestamp(index);
+                }
+            }
+            index++;
         }
 
         private <V> ByteBuffer value(Cell<V> c)
@@ -408,35 +464,38 @@ public abstract class Selector
         public void reset(boolean deep)
         {
             index = 0;
+            this.writetimes = initTimestamps(TimestampsType.WRITETIMES, collectWritetimes, columns);
+            this.ttls = initTimestamps(TimestampsType.TTLS, collectTTLs, columns);
+
             if (deep)
                 values = new ByteBuffer[values.length];
         }
 
         /**
-         * Return the timestamp of the column with the specified index.
+         * Return the timestamp of the column component with the specified indexes.
          *
-         * @param index the column index
-         * @return the timestamp of the column with the specified index
+         * @return the timestamp of the cell with the specified indexes
          */
-        public long getTimestamp(int index)
+        ColumnTimestamps getWritetimes(int columnIndex)
         {
-            return timestamps[index];
+            return writetimes.get(columnIndex);
         }
 
         /**
-         * Return the ttl of the column with the specified index.
+         * Return the ttl of the column component with the specified column and cell indexes.
          *
-         * @param index the column index
-         * @return the ttl of the column with the specified index
+         * @param columnIndex the column index
+         * @return the ttl of the column with the specified indexes
          */
-        public int getTtl(int index)
+        ColumnTimestamps getTtls(int columnIndex)
         {
-            return ttls[index];
+            return ttls.get(columnIndex);
         }
 
         /**
          * Returns the column values as list.
          * <p>This content of the list will be shared with the {@code InputRow} unless a deep reset has been done.</p>
+         *
          * @return the column values as list.
          */
         public List<ByteBuffer> getValues()
@@ -448,11 +507,10 @@ public abstract class Selector
     /**
      * Add the current value from the specified <code>ResultSetBuilder</code>.
      *
-     * @param protocolVersion protocol version used for serialization
      * @param input the input row
      * @throws InvalidRequestException if a problem occurs while adding the input row
      */
-    public abstract void addInput(ProtocolVersion protocolVersion, InputRow input);
+    public abstract void addInput(InputRow input);
 
     /**
      * Returns the selector output.
@@ -463,6 +521,16 @@ public abstract class Selector
      */
     public abstract ByteBuffer getOutput(ProtocolVersion protocolVersion) throws InvalidRequestException;
 
+    protected ColumnTimestamps getWritetimes(ProtocolVersion protocolVersion)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    protected ColumnTimestamps getTTLs(ProtocolVersion protocolVersion)
+    {
+        throw new UnsupportedOperationException();
+    }
+
     /**
      * Returns the <code>Selector</code> output type.
      *
index b54b2d4c4af7a45ebcb1b1ebca9edfed08a18819..cfe398be7ab8278defa9a62370536908dbc88208 100644 (file)
@@ -91,10 +91,10 @@ final class SetSelector extends Selector
             elements.get(i).addFetchedColumns(builder);
     }
 
-    public void addInput(ProtocolVersion protocolVersion, InputRow input)
+    public void addInput(InputRow input)
     {
         for (int i = 0, m = elements.size(); i < m; i++)
-            elements.get(i).addInput(protocolVersion, input);
+            elements.get(i).addInput(input);
     }
 
     public ByteBuffer getOutput(ProtocolVersion protocolVersion)
index a6ae4460483bca2d06694b74b36eaa7844e4539d..b4e8404f3e38e5b93f6728d4e717824d2ea6c26a 100644 (file)
@@ -118,6 +118,8 @@ public final class SimpleSelector extends Selector
     public final ColumnMetadata column;
     private final int idx;
     private ByteBuffer current;
+    private ColumnTimestamps writetimes;
+    private ColumnTimestamps ttls;
     private boolean isSet;
 
     public static Factory newFactory(final ColumnMetadata def, final int idx)
@@ -132,12 +134,14 @@ public final class SimpleSelector extends Selector
     }
 
     @Override
-    public void addInput(ProtocolVersion protocolVersion, InputRow input) throws InvalidRequestException
+    public void addInput(InputRow input) throws InvalidRequestException
     {
         if (!isSet)
         {
             isSet = true;
             current = input.getValue(idx);
+            writetimes = input.getWritetimes(idx);
+            ttls = input.getTtls(idx);
         }
     }
 
@@ -147,11 +151,25 @@ public final class SimpleSelector extends Selector
         return current;
     }
 
+    @Override
+    protected ColumnTimestamps getWritetimes(ProtocolVersion protocolVersion)
+    {
+        return writetimes;
+    }
+
+    @Override
+    protected ColumnTimestamps getTTLs(ProtocolVersion protocolVersion)
+    {
+        return ttls;
+    }
+
     @Override
     public void reset()
     {
         isSet = false;
         current = null;
+        writetimes = null;
+        ttls = null;
     }
 
     @Override
index 6f0c844dd495bf134e5fb239a1813353f31028ef..19a60ac9206534a9106c3e93831f356a73645578 100644 (file)
@@ -101,7 +101,7 @@ public class TermSelector extends Selector
     {
     }
 
-    public void addInput(ProtocolVersion protocolVersion, InputRow input)
+    public void addInput(InputRow input)
     {
     }
 
index 0c06bc2e2f3ea8f8fe5f7a8439b3fa2bb62d894a..f1698724356ca8dc471b7f77c6ea45d1491a66dc 100644 (file)
@@ -89,10 +89,10 @@ final class TupleSelector extends Selector
          &n