HBASE-27279 Addendum fix TestSaslTlsIPCRejectPlainText master
authorDuo Zhang <zhangduo@apache.org>
Tue, 16 Aug 2022 13:43:03 +0000 (21:43 +0800)
committerDuo Zhang <zhangduo@apache.org>
Tue, 16 Aug 2022 13:43:03 +0000 (21:43 +0800)
753 files changed:
bin/graceful_stop.sh
bin/hbase
bin/hbase.cmd
conf/hbase-env.sh
dev-support/Jenkinsfile
dev-support/hbase-personality.sh
hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java
hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java
hbase-assembly/pom.xml
hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java
hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java
hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java
hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java
hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java
hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java
hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java
hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java
hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java
hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java
hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java
hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java
hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionHDFSBlockLocationFinder.java
hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java
hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java
hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java
hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java
hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java
hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutateResult.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumer.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServiceCaller.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicy.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicyFactory.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java
hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComponentComparator.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithOR.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/package-info.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcControllerImpl.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java
hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshot.java
hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotView.java
hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java [deleted file]
hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java [deleted file]
hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java
hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java
hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java
hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java
hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java
hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java
hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java
hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java
hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java
hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java
hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java
hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java
hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java
hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java
hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java
hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java
hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java
hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java
hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java
hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java
hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java
hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java
hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java
hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java
hbase-common/pom.xml
hbase-common/src/main/java/org/apache/hadoop/hbase/ArrayBackedTag.java
hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferExtendedCell.java
hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java
hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java
hbase-common/src/main/java/org/apache/hadoop/hbase/CellScannable.java
hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java
hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java
hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
hbase-common/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java
hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCell.java
hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java
hbase-common/src/main/java/org/apache/hadoop/hbase/RawCellBuilderFactory.java
hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java
hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
hbase-common/src/main/java/org/apache/hadoop/hbase/Stoppable.java
hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java
hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java
hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/KeyManagerException.java [new file with mode: 0644]
hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/SSLContextException.java [new file with mode: 0644]
hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/TrustManagerException.java [new file with mode: 0644]
hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/X509Exception.java [new file with mode: 0644]
hbase-common/src/main/java/org/apache/hadoop/hbase/filter/ByteArrayComparable.java
hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteArrayOutputStream.java
hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffAllocator.java
hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/KeyStoreFileType.java [new file with mode: 0644]
hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/tls/X509Util.java [new file with mode: 0644]
hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java
hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockType.java
hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/trace/HFileContextAttributesBuilderConsumer.java [new file with mode: 0644]
hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/BlockIOUtils.java
hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
hbase-common/src/main/java/org/apache/hadoop/hbase/nio/RefCnt.java
hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseSemanticAttributes.java
hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java
hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
hbase-common/src/main/java/org/apache/hadoop/hbase/util/ExceptionUtil.java
hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java
hbase-common/src/main/java/org/apache/hadoop/hbase/util/HashKey.java
hbase-common/src/main/java/org/apache/hadoop/hbase/util/JVM.java
hbase-common/src/main/java/org/apache/hadoop/hbase/util/NettyFutureUtils.java [new file with mode: 0644]
hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestByteBuffAllocator.java
hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestByteBuffAllocatorLeakDetection.java [new file with mode: 0644]
hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/TestX509Util.java [new file with mode: 0644]
hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509KeyType.java [new file with mode: 0644]
hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContext.java [new file with mode: 0644]
hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestContextProvider.java [new file with mode: 0644]
hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/tls/X509TestHelpers.java [new file with mode: 0644]
hbase-common/src/test/java/org/apache/hadoop/hbase/net/BoundSocketMaker.java
hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java
hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java
hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java
hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java
hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java
hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/coprocessor/TestAggregationClient.java
hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java
hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java
hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java
hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java
hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java
hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/HttpProxyExample.java
hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java
hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java
hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java
hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java
hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java
hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java
hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java
hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java
hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java
hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java
hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java
hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java
hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java
hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java
hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java
hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java
hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java
hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java
hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java
hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java
hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java
hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java
hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java
hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java
hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java
hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java
hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java
hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java
hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java
hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMobCompaction.java
hbase-it/src/test/java/org/apache/hadoop/hbase/ShellExecEndpointCoprocessor.java
hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MergeRandomAdjacentRegionsOfTableAction.java
hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java
hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java
hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureMetrics.java
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java
hbase-server/pom.xml
hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
hbase-server/src/main/java/org/apache/hadoop/hbase/MetaRegionLocationCache.java
hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java
hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java
hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java
hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java
hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java
hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java
hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java
hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java
hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java
hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java
hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java
hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCompressedSizePredicator.java [new file with mode: 0644]
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BloomFilterMetrics.java [new file with mode: 0644]
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PreviousBlockCompressionRatePredicator.java [new file with mode: 0644]
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/UncompressedBlockSizePredicator.java [new file with mode: 0644]
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyHBaseSaslRpcServerHandler.java [new file with mode: 0644]
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerPreambleHandler.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServerRequestDecoder.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QueueBalancer.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcHandler.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcScheduler.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/hbck/HbckChore.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitorReport.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java
hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java
hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java
hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/OperationQuota.java
hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java
hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaState.java
hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java
hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java
hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChangedReadersObserver.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Chunk.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushPolicy.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseManager.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSizing.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregate.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSAnnotationReadingPriorityFunction.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScannerImpl.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreConfigInformation.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/OffPeakHours.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnCount.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFilePrettyPrinter.java [new file with mode: 0644]
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALSyncTimeoutIOException.java [new file with mode: 0644]
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java
hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java
hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java
hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java
hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java
hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/FsDelegationToken.java
hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java
hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFiles.java
hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckTableInfo.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractRecoveredEditsOutputSink.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestUpdateConfiguration.java
hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java [deleted file]
hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerTimeouts.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java
hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockIOUtils.java
hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockUnpack.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java
hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCacheRefCnt.java
hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java
hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/FailingNettyRpcServer.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperStub.java
hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java
hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBufferChain.java
hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java
hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPCCloseConnection.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcServer.java
hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSecureNettyRpcServer.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSecureSimpleRpcServer.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcServer.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java
hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java
hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java
hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java
hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestTaskMonitor.java
hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableAggregate.java
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerTimeoutHandling.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileListFilePrinter.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALDurability.java
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALSyncTimeoutException.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java
hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetaRegionReplicaReplication.java
hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestUpdateRSGroupConfiguration.java
hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestTlsRejectPlainText.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestNettyIPCSslFailure.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestNettyTlsIPC.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestNettyTlsIPCRejectPlainText.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSaslTlsIPC.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSaslTlsIPCRejectPlainText.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureIPC.java
hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java
hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitBoundedLogWriterCreation.java
hbase-server/src/test/resources/hbase-site.xml
hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml
hbase-shaded/hbase-shaded-mapreduce/pom.xml
hbase-shaded/hbase-shaded-testing-util-tester/pom.xml
hbase-shaded/hbase-shaded-testing-util/pom.xml
hbase-shaded/pom.xml
hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java
hbase-testing-util/pom.xml
hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseZKTestingUtility.java
hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
hbase-testing-util/src/main/java/org/apache/hadoop/hbase/StartMiniClusterOption.java
hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ImplType.java
hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java
hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java
hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java
hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKListener.java
hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
pom.xml
src/main/asciidoc/_chapters/architecture.adoc
src/main/asciidoc/_chapters/community.adoc
src/main/asciidoc/_chapters/configuration.adoc
src/main/asciidoc/_chapters/ops_mgt.adoc
src/site/xdoc/downloads.xml

index fc18239830b2b6fc1aa14fe2e8f34ca5b6e0123a..da3495b1d7bf345e0e0a5fc47f6193777e72205f 100755 (executable)
@@ -115,7 +115,7 @@ if [ "$nob" == "true"  ]; then
   HBASE_BALANCER_STATE=false
 else
   log "Disabling load balancer"
-  HBASE_BALANCER_STATE=$(echo 'balance_switch false' | "$bin"/hbase --config "${HBASE_CONF_DIR}" shell -n | tail -1)
+  HBASE_BALANCER_STATE=$(echo 'balance_switch false' | "$bin"/hbase --config "${HBASE_CONF_DIR}" shell -n | grep 'Previous balancer state' | awk -F": " '{print $2}')
   log "Previous balancer state was $HBASE_BALANCER_STATE"
 fi
 
index fd92b126ea4b61ae0aa74f3d0c17d8a743f77774..df73d4713d0b184cf07d930301a80cf25046ed72 100755 (executable)
--- a/bin/hbase
+++ b/bin/hbase
@@ -83,6 +83,7 @@ show_usage() {
   if [ "${in_omnibus_tarball}" = "true" ]; then
     echo "  wal              Write-ahead-log analyzer"
     echo "  hfile            Store file analyzer"
+    echo "  sft              Store file tracker viewer"
     echo "  zkcli            Run the ZooKeeper shell"
     echo "  master           Run an HBase HMaster node"
     echo "  regionserver     Run an HBase HRegionServer node"
@@ -597,6 +598,8 @@ elif [ "$COMMAND" = "wal" ] ; then
   CLASS='org.apache.hadoop.hbase.wal.WALPrettyPrinter'
 elif [ "$COMMAND" = "hfile" ] ; then
   CLASS='org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter'
+elif [ "$COMMAND" = "sft" ] ; then
+  CLASS='org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileListFilePrettyPrinter'
 elif [ "$COMMAND" = "zkcli" ] ; then
   CLASS="org.apache.hadoop.hbase.zookeeper.ZKMainServer"
   for f in $HBASE_HOME/lib/zkcli/*.jar; do
@@ -825,7 +828,7 @@ elif [ "${DEBUG}" = "true" ]; then
   echo "Skipped adding JDK11 JVM flags."
 fi
 
-if [[ -n "${HBASE_TRACE_OPTS}" ]]; then
+if [[ "${HBASE_OTEL_TRACING_ENABLED:-false}" = "true" ]] ; then
   if [ "${DEBUG}" = "true" ]; then
     echo "Attaching opentelemetry agent"
   fi
index 3b569099090f75b6ed4851e663e7ac1f64b63647..f8111a3bc0a97b2230b4a5ce156352f1883b8adc 100644 (file)
@@ -435,6 +435,10 @@ goto :eof
   set CLASS=org.apache.hadoop.hbase.io.hfile.HFile
   goto :eof
 
+:sft
+  set CLASS=org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileListFilePrettyPrinter
+  goto :eof
+
 :zkcli
   set CLASS=org.apache.hadoop.hbase.zookeeper.ZKMainServer
   set CLASSPATH=!CLASSPATH!;%HBASE_HOME%\lib\zkcli\*
@@ -468,6 +472,7 @@ goto :eof
   echo   hbck            Run the hbase 'fsck' tool
   echo   wal             Write-ahead-log analyzer
   echo   hfile           Store file analyzer
+  echo   sft             Store file tracker viewer
   echo   zkcli           Run the ZooKeeper shell
   echo   master          Run an HBase HMaster node
   echo   regionserver    Run an HBase HRegionServer node
index c0b39f74a6351c7fd3959688137db13a7036e9ae..73b7b72ea2f7865ccc323b6c0ea1f618a9886582 100644 (file)
 # export GREP="${GREP-grep}"
 # export SED="${SED-sed}"
 
-# Tracing
-# Uncomment some combination of these lines to enable tracing. You should change the options to use
-# the exporters appropriate to your environment. See
-# https://github.com/open-telemetry/opentelemetry-java-instrumentation for details on how to
-# configure exporters and other components through system properties.
 #
-# The presence HBASE_TRACE_OPTS indicates that tracing should be enabled, adding the agent to the
-# JVM launch command.
-# export HBASE_TRACE_OPTS="-Dotel.traces.exporter=none -Dotel.metrics.exporter=none"
+## OpenTelemetry Tracing
 #
-# For standalone mode, you must explicitly add HBASE_TRACE_OPTS to HBASE_OPTS by uncommenting this line.
-# But do not use and uncomment this line if you're running in distributed mode.
-# export HBASE_OPTS="${HBASE_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-standalone"
+# HBase is instrumented for tracing using OpenTelemetry. None of the other OpenTelemetry signals
+# are supported at this time. Configuring tracing involves setting several configuration points,
+# via environment variable or system property. This configuration prefers setting environment
+# variables whenever possible because they are picked up by all processes launched by `bin/hbase`.
+# Use system properties when you launch multiple processes from the same configuration directory --
+# when you need to specify different configuration values for different hbase processes that are
+# launched using the same HBase configuration (i.e., a single-host pseudo-distributed cluster or
+# launching the `bin/hbase shell` from a host that is also running an instance of the master). See
+# https://github.com/open-telemetry/opentelemetry-java/tree/v1.15.0/sdk-extensions/autoconfigure
+# for an inventory of configuration points and detailed explanations of each of them.
 #
-# Per-process configuration variables allow for fine-grained configuration control.
-# export HBASE_SHELL_OPTS="${HBASE_SHELL_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-shell"
-# export HBASE_JSHELL_OPTS="${HBASE_JSHELL_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-jshell"
-# export HBASE_HBCK_OPTS="${HBASE_HBCK_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-hbck"
-# export HBASE_MASTER_OPTS="${HBASE_MASTER_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-master"
-# export HBASE_REGIONSERVER_OPTS="${HBASE_REGIONSERVER_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-regionserver"
-# export HBASE_THRIFT_OPTS="${HBASE_THRIFT_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-thrift"
-# export HBASE_REST_OPTS="${HBASE_REST_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-rest"
-# export HBASE_ZOOKEEPER_OPTS="${HBASE_ZOOKEEPER_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-zookeeper"
-# export HBASE_PE_OPTS="${HBASE_PE_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-performanceevaluation"
-# export HBASE_LTT_OPTS="${HBASE_LTT_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-loadtesttool"
-# export HBASE_CANARY_OPTS="${HBASE_CANARY_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-canary"
-# export HBASE_HBTOP_OPTS="${HBASE_HBTOP_OPTS} ${HBASE_TRACE_OPTS} -Dotel.resource.attributes=service.name=hbase-hbtop"
+# Note also that as of this writing, the javaagent logs to stderr and is not configured along with
+# the rest of HBase's logging configuration.
 #
-# Manually specify a value for OPENTELEMETRY_JAVAAGENT_PATH to override the autodiscovery mechanism
-# export OPENTELEMETRY_JAVAAGENT_PATH=""
+# `HBASE_OTEL_TRACING_ENABLED`, required. Enable attaching the opentelemetry javaagent to the
+# process via support provided by `bin/hbase`. When this value us `false`, the agent is not added
+# to the process launch arguments and all further OpenTelemetry configuration is ignored.
+#export HBASE_OTEL_TRACING_ENABLED=true
+#
+# `OPENTELEMETRY_JAVAAGENT_PATH`, optional. Override the javaagent provided by HBase in `lib/trace`
+# with an alternate. Use when you need to upgrade the agent version or swap out the official one
+# for an alternative implementation.
+#export OPENTELEMETRY_JAVAAGENT_PATH=""
+#
+# `OTEL_FOO_EXPORTER`, required. Specify an Exporter implementation per signal type. HBase only
+# makes explicit use of the traces signal at this time, so the important one is
+# `OTEL_TRACES_EXPORTER`. Specify its value based on the exporter required for your tracing
+# environment. The other two should be uncommented and specified as `none`, otherwise the agent
+# may report errors while attempting to export these other signals to an unconfigured destination.
+# https://github.com/open-telemetry/opentelemetry-java/tree/v1.15.0/sdk-extensions/autoconfigure#exporters
+#export OTEL_TRACES_EXPORTER=""
+#export OTEL_METRICS_EXPORTER="none"
+#export OTEL_LOGS_EXPORTER="none"
+#
+# `OTEL_SERVICE_NAME`, required. Specify "resource attributes", and specifically the `service.name`,
+# as a unique value for each HBase process. OpenTelemetry allows for specifying this value in one
+# of two ways, via environment variables with the `OTEL_` prefix, or via system properties with the
+# `otel.` prefix. Which you use with HBase is decided based on whether this configuration file is
+# read by a single process or shared by multiple HBase processes. For the default standalone mode
+# or an environment where all processes share the same configuration file, use the `otel` system
+# properties by uncommenting all of the `HBASE_FOO_OPTS` exports below. When this configuration file
+# is being consumed by only a single process -- for example, from a systemd configuration or in a
+# container template -- replace use of `HBASE_FOO_OPTS` with the standard `OTEL_SERVICE_NAME` and/or
+# `OTEL_RESOURCE_ATTRIBUTES` environment variables. For further details, see
+# https://github.com/open-telemetry/opentelemetry-java/tree/v1.15.0/sdk-extensions/autoconfigure#opentelemetry-resource
+#export HBASE_CANARY_OPTS="${HBASE_CANARY_OPTS} -Dotel.resource.attributes=service.name=hbase-canary"
+#export HBASE_HBCK_OPTS="${HBASE_HBCK_OPTS} -Dotel.resource.attributes=service.name=hbase-hbck"
+#export HBASE_HBTOP_OPTS="${HBASE_HBTOP_OPTS} -Dotel.resource.attributes=service.name=hbase-hbtop"
+#export HBASE_JSHELL_OPTS="${HBASE_JSHELL_OPTS} -Dotel.resource.attributes=service.name=hbase-jshell"
+#export HBASE_LTT_OPTS="${HBASE_LTT_OPTS} -Dotel.resource.attributes=service.name=hbase-loadtesttool"
+#export HBASE_MASTER_OPTS="${HBASE_MASTER_OPTS} -Dotel.resource.attributes=service.name=hbase-master"
+#export HBASE_PE_OPTS="${HBASE_PE_OPTS} -Dotel.resource.attributes=service.name=hbase-performanceevaluation"
+#export HBASE_REGIONSERVER_OPTS="${HBASE_REGIONSERVER_OPTS} -Dotel.resource.attributes=service.name=hbase-regionserver"
+#export HBASE_REST_OPTS="${HBASE_REST_OPTS} -Dotel.resource.attributes=service.name=hbase-rest"
+#export HBASE_SHELL_OPTS="${HBASE_SHELL_OPTS} -Dotel.resource.attributes=service.name=hbase-shell"
+#export HBASE_THRIFT_OPTS="${HBASE_THRIFT_OPTS} -Dotel.resource.attributes=service.name=hbase-thrift"
+#export HBASE_ZOOKEEPER_OPTS="${HBASE_ZOOKEEPER_OPTS} -Dotel.resource.attributes=service.name=hbase-zookeeper"
 
-# Additional argments passed to jshell invocation
+#
+# JDK11+ JShell
+#
+# Additional arguments passed to jshell invocation
 # export HBASE_JSHELL_ARGS="--startup DEFAULT --startup PRINTING --startup hbase_startup.jsh"
index c849870305a175b6e4a0775a6e369ded732b7b25..f287f01f637d13f78ba6494ba990274b496a9e16 100644 (file)
@@ -34,7 +34,6 @@ pipeline {
     YETUS_RELEASE = '0.12.0'
     // where we'll write everything from different steps. Need a copy here so the final step can check for success/failure.
     OUTPUT_DIR_RELATIVE_GENERAL = 'output-general'
-    OUTPUT_DIR_RELATIVE_JDK7 = 'output-jdk7'
     OUTPUT_DIR_RELATIVE_JDK8_HADOOP2 = 'output-jdk8-hadoop2'
     OUTPUT_DIR_RELATIVE_JDK8_HADOOP3 = 'output-jdk8-hadoop3'
     OUTPUT_DIR_RELATIVE_JDK11_HADOOP3 = 'output-jdk11-hadoop3'
@@ -186,7 +185,6 @@ pipeline {
         // stash with given name for all tests we might run, so that we can unstash all of them even if
         // we skip some due to e.g. branch-specific JDK or Hadoop support
         stash name: 'general-result', allowEmpty: true, includes: "${OUTPUT_DIR_RELATIVE_GENERAL}/doesn't-match"
-        stash name: 'jdk7-result', allowEmpty: true, includes: "${OUTPUT_DIR_RELATIVE_JDK7}/doesn't-match"
         stash name: 'jdk8-hadoop2-result', allowEmpty: true, includes: "${OUTPUT_DIR_RELATIVE_JDK8_HADOOP2}/doesn't-match"
         stash name: 'jdk8-hadoop3-result', allowEmpty: true, includes: "${OUTPUT_DIR_RELATIVE_JDK8_HADOOP3}/doesn't-match"
         stash name: 'jdk11-hadoop3-result', allowEmpty: true, includes: "${OUTPUT_DIR_RELATIVE_JDK11_HADOOP3}/doesn't-match"
@@ -296,116 +294,6 @@ pipeline {
             }
           }
         }
-        stage ('yetus jdk7 checks') {
-          agent {
-            node {
-              label 'hbase'
-            }
-          }
-          when {
-            branch 'branch-1*'
-          }
-          environment {
-            BASEDIR = "${env.WORKSPACE}/component"
-            TESTS = "${env.DEEP_CHECKS}"
-            OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_JDK7}"
-            OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_JDK7}"
-            SET_JAVA_HOME = "/usr/lib/jvm/java-7"
-          }
-          steps {
-            // Must do prior to anything else, since if one of them timesout we'll stash the commentfile
-            sh '''#!/usr/bin/env bash
-              set -e
-              rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
-              echo '(x) {color:red}-1 jdk7 checks{color}' >"${OUTPUT_DIR}/commentfile"
-              echo "-- Something went wrong running this stage, please [check relevant console output|${BUILD_URL}/console]." >> "${OUTPUT_DIR}/commentfile"
-            '''
-            unstash 'yetus'
-            dir('component') {
-              checkout scm
-            }
-            sh '''#!/usr/bin/env bash
-              set -e
-              rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
-              "${BASEDIR}/dev-support/gather_machine_environment.sh" "${OUTPUT_DIR_RELATIVE}/machine"
-              echo "got the following saved stats in '${OUTPUT_DIR_RELATIVE}/machine'"
-              ls -lh "${OUTPUT_DIR_RELATIVE}/machine"
-            '''
-            script {
-              def ret = sh(
-                returnStatus: true,
-                script: '''#!/usr/bin/env bash
-                  set -e
-                  declare -i status=0
-                  if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
-                    echo '(/) {color:green}+1 jdk7 checks{color}' > "${OUTPUT_DIR}/commentfile"
-                  else
-                    echo '(x) {color:red}-1 jdk7 checks{color}' > "${OUTPUT_DIR}/commentfile"
-                    status=1
-                  fi
-                  echo "-- For more information [see jdk7 report|${BUILD_URL}/JDK7_20Nightly_20Build_20Report/]" >> "${OUTPUT_DIR}/commentfile"
-                  exit "${status}"
-                '''
-              )
-              if (ret != 0) {
-                // mark the build as UNSTABLE instead of FAILURE, to avoid skipping the later publish of
-                // test output. See HBASE-26339 for more details.
-                currentBuild.result = 'UNSTABLE'
-              }
-            }
-          }
-          post {
-            always {
-              stash name: 'jdk7-result', includes: "${OUTPUT_DIR_RELATIVE}/commentfile"
-              junit testResults: "${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
-              // zip surefire reports.
-              sh '''#!/bin/bash -e
-                if [ -d "${OUTPUT_DIR}/archiver" ]; then
-                  count=$(find "${OUTPUT_DIR}/archiver" -type f | wc -l)
-                  if [[ 0 -ne ${count} ]]; then
-                    echo "zipping ${count} archived files"
-                    zip -q -m -r "${OUTPUT_DIR}/test_logs.zip" "${OUTPUT_DIR}/archiver"
-                  else
-                    echo "No archived files, skipping compressing."
-                  fi
-                else
-                  echo "No archiver directory, skipping compressing."
-                fi
-'''
-              sshPublisher(publishers: [
-                sshPublisherDesc(configName: 'Nightlies',
-                  transfers: [
-                    sshTransfer(remoteDirectory: "hbase/${JOB_NAME}/${BUILD_NUMBER}",
-                      sourceFiles: "${env.OUTPUT_DIR_RELATIVE}/test_logs.zip"
-                    )
-                  ]
-                )
-              ])
-              // remove the big test logs zip file, store the nightlies url in test_logs.html
-              sh '''#!/bin/bash -e
-                if [ -f "${OUTPUT_DIR}/test_logs.zip" ]; then
-                  echo "Remove ${OUTPUT_DIR}/test_logs.zip for saving space"
-                  rm -rf "${OUTPUT_DIR}/test_logs.zip"
-                  python2 ${BASEDIR}/dev-support/gen_redirect_html.py "${ASF_NIGHTLIES_BASE}/${OUTPUT_DIR_RELATIVE}" > "${OUTPUT_DIR}/test_logs.html"
-                else
-                  echo "No test_logs.zip, skipping"
-                fi
-'''
-              // Has to be relative to WORKSPACE.
-              archiveArtifacts artifacts: "${env.OUTPUT_DIR_RELATIVE}/*"
-              archiveArtifacts artifacts: "${env.OUTPUT_DIR_RELATIVE}/**/*"
-              publishHTML target: [
-                allowMissing         : true,
-                keepAll              : true,
-                alwaysLinkToLastBuild: true,
-                // Has to be relative to WORKSPACE.
-                reportDir            : "${env.OUTPUT_DIR_RELATIVE}",
-                reportFiles          : 'console-report.html',
-                reportName           : 'JDK7 Nightly Build Report'
-              ]
-            }
-          }
-        }
         stage ('yetus jdk8 hadoop2 checks') {
           agent {
             node {
@@ -413,7 +301,7 @@ pipeline {
             }
           }
           when {
-            anyOf { branch 'branch-1*'; branch 'branch-2*' }
+            branch 'branch-2*'
           }
           environment {
             BASEDIR = "${env.WORKSPACE}/component"
@@ -522,11 +410,6 @@ pipeline {
               label 'hbase'
             }
           }
-          when {
-            not {
-              branch 'branch-1*'
-            }
-          }
           environment {
             BASEDIR = "${env.WORKSPACE}/component"
             TESTS = "${env.DEEP_CHECKS}"
@@ -636,11 +519,6 @@ pipeline {
               label 'hbase'
             }
           }
-          when {
-            not {
-              branch 'branch-1*'
-            }
-          }
           environment {
             BASEDIR = "${env.WORKSPACE}/component"
             TESTS = "${env.DEEP_CHECKS}"
@@ -817,7 +695,7 @@ pipeline {
             '''
             unstash 'hadoop-2'
             sh '''#!/bin/bash -xe
-              if [[ "${BRANCH}" = branch-2* ]] || [[ "${BRANCH}" = branch-1* ]]; then
+              if [[ "${BRANCH}" = branch-2* ]]; then
                 echo "Attempting to use run an instance on top of Hadoop 2."
                 artifact=$(ls -1 "${WORKSPACE}"/hadoop-2*.tar.gz | head -n 1)
                 tar --strip-components=1 -xzf "${artifact}" -C "hadoop-2"
@@ -841,44 +719,40 @@ pipeline {
             '''
             unstash 'hadoop-3'
             sh '''#!/bin/bash -e
-              if [[ "${BRANCH}" = branch-1* ]]; then
-                echo "Skipping to run against Hadoop 3 for branch ${BRANCH}"
-              else
-                echo "Attempting to use run an instance on top of Hadoop 3."
-                artifact=$(ls -1 "${WORKSPACE}"/hadoop-3*.tar.gz | head -n 1)
-                tar --strip-components=1 -xzf "${artifact}" -C "hadoop-3"
-                if ! "${BASEDIR}/dev-support/hbase_nightly_pseudo-distributed-test.sh" \
-                    --single-process \
-                    --working-dir output-integration/hadoop-3 \
-                    --hbase-client-install hbase-client \
-                    hbase-install \
-                    hadoop-3/bin/hadoop \
-                    hadoop-3/share/hadoop/yarn/timelineservice \
-                    hadoop-3/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
-                    hadoop-3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \
-                    hadoop-3/bin/mapred \
-                    >output-integration/hadoop-3.log 2>&1 ; then
-                  echo "(x) {color:red}-1 client integration test{color}\n--Failed when running client tests on top of Hadoop 3. [see log for details|${BUILD_URL}/artifact/output-integration/hadoop-3.log]. (note that this means we didn't check the Hadoop 3 shaded client)" >output-integration/commentfile
-                  exit 2
-                fi
-                echo "Attempting to use run an instance on top of Hadoop 3, relying on the Hadoop client artifacts for the example client program."
-                if ! "${BASEDIR}/dev-support/hbase_nightly_pseudo-distributed-test.sh" \
-                    --single-process \
-                    --hadoop-client-classpath hadoop-3/share/hadoop/client/hadoop-client-api-*.jar:hadoop-3/share/hadoop/client/hadoop-client-runtime-*.jar \
-                    --working-dir output-integration/hadoop-3-shaded \
-                    --hbase-client-install hbase-client \
-                    hbase-install \
-                    hadoop-3/bin/hadoop \
-                    hadoop-3/share/hadoop/yarn/timelineservice \
-                    hadoop-3/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
-                    hadoop-3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \
-                    hadoop-3/bin/mapred \
-                    >output-integration/hadoop-3-shaded.log 2>&1 ; then
-                  echo "(x) {color:red}-1 client integration test{color}\n--Failed when running client tests on top of Hadoop 3 using Hadoop's shaded client. [see log for details|${BUILD_URL}/artifact/output-integration/hadoop-3-shaded.log]." >output-integration/commentfile
-                  exit 2
-                fi
-                echo "(/) {color:green}+1 client integration test{color}" >output-integration/commentfile
+              echo "Attempting to use run an instance on top of Hadoop 3."
+              artifact=$(ls -1 "${WORKSPACE}"/hadoop-3*.tar.gz | head -n 1)
+              tar --strip-components=1 -xzf "${artifact}" -C "hadoop-3"
+              if ! "${BASEDIR}/dev-support/hbase_nightly_pseudo-distributed-test.sh" \
+                  --single-process \
+                  --working-dir output-integration/hadoop-3 \
+                  --hbase-client-install hbase-client \
+                  hbase-install \
+                  hadoop-3/bin/hadoop \
+                  hadoop-3/share/hadoop/yarn/timelineservice \
+                  hadoop-3/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
+                  hadoop-3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \
+                  hadoop-3/bin/mapred \
+                  >output-integration/hadoop-3.log 2>&1 ; then
+                echo "(x) {color:red}-1 client integration test{color}\n--Failed when running client tests on top of Hadoop 3. [see log for details|${BUILD_URL}/artifact/output-integration/hadoop-3.log]. (note that this means we didn't check the Hadoop 3 shaded client)" >output-integration/commentfile
+                exit 2
+              fi
+              echo "Attempting to use run an instance on top of Hadoop 3, relying on the Hadoop client artifacts for the example client program."
+              if ! "${BASEDIR}/dev-support/hbase_nightly_pseudo-distributed-test.sh" \
+                  --single-process \
+                  --hadoop-client-classpath hadoop-3/share/hadoop/client/hadoop-client-api-*.jar:hadoop-3/share/hadoop/client/hadoop-client-runtime-*.jar \
+                  --working-dir output-integration/hadoop-3-shaded \
+                  --hbase-client-install hbase-client \
+                  hbase-install \
+                  hadoop-3/bin/hadoop \
+                  hadoop-3/share/hadoop/yarn/timelineservice \
+                  hadoop-3/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
+                  hadoop-3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \
+                  hadoop-3/bin/mapred \
+                  >output-integration/hadoop-3-shaded.log 2>&1 ; then
+                echo "(x) {color:red}-1 client integration test{color}\n--Failed when running client tests on top of Hadoop 3 using Hadoop's shaded client. [see log for details|${BUILD_URL}/artifact/output-integration/hadoop-3-shaded.log]." >output-integration/commentfile
+                exit 2
               fi
+              echo "(/) {color:green}+1 client integration test{color}" >output-integration/commentfile
             '''
           }
           post {
@@ -919,14 +793,12 @@ pipeline {
       script {
          try {
            unstash 'general-result'
-           unstash 'jdk7-result'
            unstash 'jdk8-hadoop2-result'
            unstash 'jdk8-hadoop3-result'
            unstash 'jdk11-hadoop3-result'
            unstash 'srctarball-result'
            sh "printenv"
            def results = ["${env.OUTPUT_DIR_RELATIVE_GENERAL}/commentfile",
-                          "${env.OUTPUT_DIR_RELATIVE_JDK7}/commentfile",
                           "${env.OUTPUT_DIR_RELATIVE_JDK8_HADOOP2}/commentfile",
                           "${env.OUTPUT_DIR_RELATIVE_JDK8_HADOOP3}/commentfile",
                           "${env.OUTPUT_DIR_RELATIVE_JDK11_HADOOP3}/commentfile",
index dbcb7ecc98243be98f69caca95e6bf315763f62a..5f18b4652afc6578b468a22ab7fb3eb6a00121cc 100755 (executable)
@@ -145,8 +145,6 @@ function personality_modules
   local repostatus=$1
   local testtype=$2
   local extra=""
-  local branch1jdk8=()
-  local jdk8module=""
   local MODULES=("${CHANGED_MODULES[@]}")
 
   yetus_info "Personality: ${repostatus} ${testtype}"
@@ -173,10 +171,6 @@ function personality_modules
   tmpdir=$(realpath target)
   extra="${extra} -Djava.io.tmpdir=${tmpdir} -DHBasePatchProcess"
 
-  if [[ "${PATCH_BRANCH}" = branch-1* ]]; then
-    extra="${extra} -Dhttps.protocols=TLSv1.2"
-  fi
-
   # If we have HADOOP_PROFILE specified and we're on branch-2.x, pass along
   # the hadoop.profile system property. Ensures that Hadoop2 and Hadoop3
   # logic is not both activated within Maven.
@@ -207,21 +201,6 @@ function personality_modules
     return
   fi
 
-  # This list should include any modules that require jdk8. Maven should be configured to only
-  # include them when a proper JDK is in use, but that doesn' work if we specifically ask for the
-  # module to build as yetus does if something changes in the module.  Rather than try to
-  # figure out what jdk is in use so we can duplicate the module activation logic, just
-  # build at the top level if anything changes in one of these modules and let maven sort it out.
-  branch1jdk8=(hbase-error-prone hbase-tinylfu-blockcache)
-  if [[ "${PATCH_BRANCH}" = branch-1* ]]; then
-    for jdk8module in "${branch1jdk8[@]}"; do
-      if [[ "${MODULES[*]}" =~ ${jdk8module} ]]; then
-        MODULES=(.)
-        break
-      fi
-    done
-  fi
-
   if [[ ${testtype} == spotbugs ]]; then
     # Run spotbugs on each module individually to diff pre-patch and post-patch results and
     # report new warnings for changed modules only.
@@ -241,8 +220,7 @@ function personality_modules
     return
   fi
 
-  if [[ ${testtype} == compile ]] && [[ "${SKIP_ERRORPRONE}" != "true" ]] &&
-      [[ "${PATCH_BRANCH}" != branch-1* ]] ; then
+  if [[ ${testtype} == compile ]] && [[ "${SKIP_ERRORPRONE}" != "true" ]]; then
     extra="${extra} -PerrorProne"
   fi
 
@@ -445,11 +423,7 @@ function refguide_rebuild
     return 1
   fi
 
-  if [[ "${PATCH_BRANCH}" = branch-1* ]]; then
-    pdf_output="book.pdf"
-  else
-    pdf_output="apache_hbase_reference_guide.pdf"
-  fi
+  pdf_output="apache_hbase_reference_guide.pdf"
 
   if [[ ! -f "${PATCH_DIR}/${repostatus}-site/${pdf_output}" ]]; then
     add_vote_table -1 refguide "${repostatus} failed to produce the pdf version of the reference guide."
@@ -601,75 +575,34 @@ function hadoopcheck_rebuild
 
   # All supported Hadoop versions that we want to test the compilation with
   # See the Hadoop section on prereqs in the HBase Reference Guide
-  if [[ "${PATCH_BRANCH}" = branch-1.4 ]]; then
-    yetus_info "Setting Hadoop 2 versions to test based on branch-1.4 rules."
-    if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-      hbase_hadoop2_versions="2.7.7"
-    else
-      hbase_hadoop2_versions="2.7.1 2.7.2 2.7.3 2.7.4 2.7.5 2.7.6 2.7.7"
-    fi
-  elif [[ "${PATCH_BRANCH}" = branch-1 ]]; then
-    yetus_info "Setting Hadoop 2 versions to test based on branch-1 rules."
-    if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-      hbase_hadoop2_versions="2.10.0"
-    else
-      hbase_hadoop2_versions="2.10.0"
-    fi
-  elif [[ "${PATCH_BRANCH}" = branch-2.0 ]]; then
-    yetus_info "Setting Hadoop 2 versions to test based on branch-2.0 rules."
-    if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-      hbase_hadoop2_versions="2.6.5 2.7.7 2.8.5"
-    else
-      hbase_hadoop2_versions="2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 2.7.1 2.7.2 2.7.3 2.7.4 2.7.5 2.7.6 2.7.7 2.8.2 2.8.3 2.8.4 2.8.5"
-    fi
-  elif [[ "${PATCH_BRANCH}" = branch-2.1 ]]; then
-    yetus_info "Setting Hadoop 2 versions to test based on branch-2.1 rules."
-    if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-      hbase_hadoop2_versions="2.7.7 2.8.5"
-    else
-      hbase_hadoop2_versions="2.7.1 2.7.2 2.7.3 2.7.4 2.7.5 2.7.6 2.7.7 2.8.2 2.8.3 2.8.4 2.8.5"
-    fi
-  elif [[ "${PATCH_BRANCH}" = branch-2.2 ]]; then
-    yetus_info "Setting Hadoop 2 versions to test based on branch-2.2 rules."
+  if [[ "${PATCH_BRANCH}" = branch-2.4 ]]; then
+    yetus_info "Setting Hadoop 2 versions to test based on branch-2.4 rules."
     if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-      hbase_hadoop2_versions="2.8.5 2.9.2 2.10.0"
+      hbase_hadoop2_versions="2.10.2"
     else
-      hbase_hadoop2_versions="2.8.5 2.9.2 2.10.0"
+      hbase_hadoop2_versions="2.10.0 2.10.1 2.10.2"
     fi
   elif [[ "${PATCH_BRANCH}" = branch-2.* ]]; then
-    yetus_info "Setting Hadoop 2 versions to test based on branch-2.3+ rules."
-    if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-      hbase_hadoop2_versions="2.10.1"
-    else
-      hbase_hadoop2_versions="2.10.0 2.10.1"
-    fi
+    yetus_info "Setting Hadoop 2 versions to test based on branch-2.5+ rules."
+    hbase_hadoop2_versions="2.10.2"
   else
     yetus_info "Setting Hadoop 2 versions to null on master/feature branch rules since we do not support hadoop 2 for hbase 3.x any more."
     hbase_hadoop2_versions=""
   fi
-  if [[ "${PATCH_BRANCH}" = branch-1* ]]; then
-    yetus_info "Setting Hadoop 3 versions to test based on branch-1.x rules."
-    hbase_hadoop3_versions=""
-  elif [[ "${PATCH_BRANCH}" = branch-2.0 ]] || [[ "${PATCH_BRANCH}" = branch-2.1 ]]; then
-    yetus_info "Setting Hadoop 3 versions to test based on branch-2.0/branch-2.1 rules"
-    if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-      hbase_hadoop3_versions="3.0.3 3.1.2"
-    else
-      hbase_hadoop3_versions="3.0.3 3.1.1 3.1.2"
-    fi
-  elif [[ "${PATCH_BRANCH}" = branch-2.2 ]] || [[ "${PATCH_BRANCH}" = branch-2.3 ]]; then
-    yetus_info "Setting Hadoop 3 versions to test based on branch-2.2/branch-2.3 rules"
+
+  if [[ "${PATCH_BRANCH}" = branch-2.4 ]]; then
+    yetus_info "Setting Hadoop 3 versions to test based on branch-2.4 rules"
     if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-      hbase_hadoop3_versions="3.1.2 3.2.2"
+      hbase_hadoop3_versions="3.1.4 3.2.4 3.3.4"
     else
-      hbase_hadoop3_versions="3.1.1 3.1.2 3.2.0 3.2.1 3.2.2"
+      hbase_hadoop3_versions="3.1.1 3.1.2 3.1.3 3.1.4 3.2.0 3.2.1 3.2.2 3.2.3 3.2.4 3.3.0 3.3.1 3.3.2 3.3.3 3.3.4"
     fi
   else
-    yetus_info "Setting Hadoop 3 versions to test based on branch-2.4+/master/feature branch rules"
+    yetus_info "Setting Hadoop 3 versions to test based on branch-2.5+/master/feature branch rules"
     if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-      hbase_hadoop3_versions="3.1.2 3.2.2 3.3.1"
+      hbase_hadoop3_versions="3.2.4 3.3.4"
     else
-      hbase_hadoop3_versions="3.1.1 3.1.2 3.2.0 3.2.1 3.2.2 3.3.0 3.3.1"
+      hbase_hadoop3_versions="3.2.3 3.2.4 3.3.2 3.3.3 3.3.4"
     fi
   fi
 
index a9795d6ba91897e15857fe5444133fb8feeb6b79..3ca23a618ae48a72a5fd02b4f6928e3e2c28957d 100644 (file)
@@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.util.Bytes;
  */
 public final class HelloHBase {
 
-  protected static final String MY_NAMESPACE_NAME = "myTestNamespace";
+  static final String MY_NAMESPACE_NAME = "myTestNamespace";
   static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable");
   static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf");
   static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn");
index 66581d5acc68c8fafbf2abb41ceaada84ac88efe..964e9a5bbd796d1691f35683bd5f754576f48127 100644 (file)
@@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.util.Bytes;
  */
 public final class HelloHBase {
 
-  protected static final String MY_NAMESPACE_NAME = "myTestNamespace";
+  static final String MY_NAMESPACE_NAME = "myTestNamespace";
   static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable");
   static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf");
   static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn");
index 4750012d44f2fa45bfba932ada59e5d4092ad416..943c8a7c00cf31ea662333a61c8989cb19e3a405 100644 (file)
     <dependency>
       <groupId>io.opentelemetry.javaagent</groupId>
       <artifactId>opentelemetry-javaagent</artifactId>
-      <classifier>all</classifier>
     </dependency>
     <dependency>
       <groupId>junit</groupId>
index b88b32bdb8142d860afba7b02e6e260a35c75d6f..cd186f904a95049d0f687bc2bb985c664397325e 100644 (file)
@@ -89,8 +89,6 @@ public interface AsyncFSOutput extends Closeable {
   @Override
   void close() throws IOException;
 
-  /**
-   * @return byteSize success synced to underlying filesystem.
-   */
+  /** Returns byteSize success synced to underlying filesystem. */
   long getSyncedLength();
 }
index 8906f003bc882131193b421b6a71025989eaa707..149dec431e0f0a60c0ee16012f9aca8923052768 100644 (file)
@@ -22,23 +22,26 @@ import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHel
 import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
 import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
 import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+import static org.apache.hadoop.hbase.util.NettyFutureUtils.consume;
+import static org.apache.hadoop.hbase.util.NettyFutureUtils.safeWrite;
+import static org.apache.hadoop.hbase.util.NettyFutureUtils.safeWriteAndFlush;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 import static org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
 import static org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.WRITER_IDLE;
 
 import com.google.errorprone.annotations.RestrictedApi;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentLinkedDeque;
-import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Supplier;
 import org.apache.hadoop.conf.Configuration;
@@ -48,6 +51,8 @@ import org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.Can
 import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FutureUtils;
+import org.apache.hadoop.hbase.util.NettyFutureUtils;
 import org.apache.hadoop.hbase.util.RecoverLeaseFSUtils;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -63,14 +68,13 @@ import org.apache.hadoop.util.DataChecksum;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
 import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
 import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocator;
 import org.apache.hbase.thirdparty.io.netty.channel.Channel;
+import org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture;
 import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandler.Sharable;
 import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
 import org.apache.hbase.thirdparty.io.netty.channel.ChannelId;
-import org.apache.hbase.thirdparty.io.netty.channel.ChannelOutboundInvoker;
 import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler;
 import org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
 import org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateEvent;
@@ -252,7 +256,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
     // disable further write, and fail all pending ack.
     state = State.BROKEN;
     failWaitingAckQueue(channel, errorSupplier);
-    datanodeInfoMap.keySet().forEach(ChannelOutboundInvoker::close);
+    datanodeInfoMap.keySet().forEach(NettyFutureUtils::safeClose);
   }
 
   private void failWaitingAckQueue(Channel channel, Supplier<Throwable> errorSupplier) {
@@ -329,7 +333,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
           ByteBuf buf = alloc.buffer(len);
           heartbeat.putInBuffer(buf.nioBuffer(0, len));
           buf.writerIndex(len);
-          ctx.channel().writeAndFlush(buf);
+          safeWriteAndFlush(ctx.channel(), buf);
         }
         return;
       }
@@ -429,14 +433,20 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
       future.completeExceptionally(new IOException("stream already broken"));
       // it's the one we have just pushed or just a no-op
       waitingAckQueue.removeFirst();
+
+      checksumBuf.release();
+      headerBuf.release();
+
+      // This method takes ownership of the dataBuf so we need release it before returning.
+      dataBuf.release();
       return;
     }
     // TODO: we should perhaps measure time taken per DN here;
     // we could collect statistics per DN, and/or exclude bad nodes in createOutput.
     datanodeInfoMap.keySet().forEach(ch -> {
-      ch.write(headerBuf.retainedDuplicate());
-      ch.write(checksumBuf.retainedDuplicate());
-      ch.writeAndFlush(dataBuf.retainedDuplicate());
+      safeWrite(ch, headerBuf.retainedDuplicate());
+      safeWrite(ch, checksumBuf.retainedDuplicate());
+      safeWriteAndFlush(ch, dataBuf.retainedDuplicate());
     });
     checksumBuf.release();
     headerBuf.release();
@@ -556,16 +566,18 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
     headerBuf.writerIndex(headerLen);
     CompletableFuture<Long> future = new CompletableFuture<>();
     waitingAckQueue.add(new Callback(future, finalizedLength, datanodeInfoMap.keySet(), 0));
-    datanodeInfoMap.keySet().forEach(ch -> ch.writeAndFlush(headerBuf.retainedDuplicate()));
+    datanodeInfoMap.keySet().forEach(ch -> safeWriteAndFlush(ch, headerBuf.retainedDuplicate()));
     headerBuf.release();
-    try {
-      future.get();
-    } catch (InterruptedException e) {
-      throw (IOException) new InterruptedIOException().initCause(e);
-    } catch (ExecutionException e) {
-      Throwable cause = e.getCause();
-      Throwables.propagateIfPossible(cause, IOException.class);
-      throw new IOException(cause);
+    FutureUtils.get(future);
+  }
+
+  private void closeDataNodeChannelsAndAwait() {
+    List<ChannelFuture> futures = new ArrayList<>();
+    for (Channel ch : datanodeInfoMap.keySet()) {
+      futures.add(ch.close());
+    }
+    for (ChannelFuture future : futures) {
+      consume(future.awaitUninterruptibly());
     }
   }
 
@@ -573,14 +585,12 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
    * The close method when error occurred. Now we just call recoverFileLease.
    */
   @Override
-  @SuppressWarnings("FutureReturnValueIgnored")
   public void recoverAndClose(CancelableProgressable reporter) throws IOException {
     if (buf != null) {
       buf.release();
       buf = null;
     }
-    datanodeInfoMap.keySet().forEach(ChannelOutboundInvoker::close);
-    datanodeInfoMap.keySet().forEach(ch -> ch.closeFuture().awaitUninterruptibly());
+    closeDataNodeChannelsAndAwait();
     endFileLease(client, fileId);
     RecoverLeaseFSUtils.recoverFileLease(dfs, new Path(src), conf,
       reporter == null ? new CancelOnClose(client) : reporter);
@@ -591,12 +601,10 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput {
    * {@link #recoverAndClose(CancelableProgressable)} if this method throws an exception.
    */
   @Override
-  @SuppressWarnings("FutureReturnValueIgnored")
   public void close() throws IOException {
     endBlock();
     state = State.CLOSED;
-    datanodeInfoMap.keySet().forEach(ChannelOutboundInvoker::close);
-    datanodeInfoMap.keySet().forEach(ch -> ch.closeFuture().awaitUninterruptibly());
+    closeDataNodeChannelsAndAwait();
     block.setNumBytes(ackedBlockLength);
     completeFile(client, namenode, src, clientName, block, fileId);
   }
index 2517f2d2c01a0c05ca86f101bbff9fc2ab868b77..9c66c53b8bfeba73be6cae895e51a83ba48be929 100644 (file)
@@ -19,6 +19,9 @@ package org.apache.hadoop.hbase.io.asyncfs;
 
 import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
 import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+import static org.apache.hadoop.hbase.util.NettyFutureUtils.addListener;
+import static org.apache.hadoop.hbase.util.NettyFutureUtils.safeClose;
+import static org.apache.hadoop.hbase.util.NettyFutureUtils.safeWriteAndFlush;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
@@ -351,7 +354,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
     buffer.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
     buffer.writeByte(Op.WRITE_BLOCK.code);
     proto.writeDelimitedTo(new ByteBufOutputStream(buffer));
-    channel.writeAndFlush(buffer);
+    safeWriteAndFlush(channel, buffer);
   }
 
   private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo,
@@ -360,7 +363,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
     throws IOException {
     Promise<Void> saslPromise = channel.eventLoop().newPromise();
     trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise);
-    saslPromise.addListener(new FutureListener<Void>() {
+    addListener(saslPromise, new FutureListener<Void>() {
 
       @Override
       public void operationComplete(Future<Void> future) throws Exception {
@@ -404,7 +407,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
       Promise<Channel> promise = eventLoopGroup.next().newPromise();
       futureList.add(promise);
       String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
-      new Bootstrap().group(eventLoopGroup).channel(channelClass)
+      addListener(new Bootstrap().group(eventLoopGroup).channel(channelClass)
         .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {
 
           @Override
@@ -413,7 +416,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
             // channel connected. Leave an empty implementation here because netty does not allow
             // a null handler.
           }
-        }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {
+        }).connect(NetUtils.createSocketAddr(dnAddr))new ChannelFutureListener() {
 
           @Override
           public void operationComplete(ChannelFuture future) throws Exception {
@@ -533,12 +536,12 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
         if (!succ) {
           if (futureList != null) {
             for (Future<Channel> f : futureList) {
-              f.addListener(new FutureListener<Channel>() {
+              addListener(f, new FutureListener<Channel>() {
 
                 @Override
                 public void operationComplete(Future<Channel> future) throws Exception {
                   if (future.isSuccess()) {
-                    future.getNow().close();
+                    safeClose(future.getNow());
                   }
                 }
               });
index 89f386c8d6446c0cbed9f2ac2e0ebdee42703af5..4ac46e8cc5dccd1267137489a16a3be8de043bcd 100644 (file)
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.io.asyncfs;
 
+import static org.apache.hadoop.hbase.util.NettyFutureUtils.safeWrite;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
 import static org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
 
@@ -448,12 +449,12 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
       size += CodedOutputStream.computeRawVarint32Size(size);
       ByteBuf buf = ctx.alloc().buffer(size);
       proto.writeDelimitedTo(new ByteBufOutputStream(buf));
-      ctx.write(buf);
+      safeWrite(ctx, buf);
     }
 
     @Override
     public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
-      ctx.write(ctx.alloc().buffer(4).writeInt(SASL_TRANSFER_MAGIC_NUMBER));
+      safeWrite(ctx, ctx.alloc().buffer(4).writeInt(SASL_TRANSFER_MAGIC_NUMBER));
       sendSaslMessage(ctx, new byte[0]);
       ctx.flush();
       step++;
@@ -642,7 +643,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
         cBuf.addComponent(buf);
         cBuf.writerIndex(cBuf.writerIndex() + buf.readableBytes());
       } else {
-        ctx.write(msg);
+        safeWrite(ctx, msg);
       }
     }
 
@@ -656,13 +657,14 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
         ByteBuf buf = ctx.alloc().ioBuffer(4 + wrapped.length);
         buf.writeInt(wrapped.length);
         buf.writeBytes(wrapped);
-        ctx.write(buf);
+        safeWrite(ctx, buf);
       }
       ctx.flush();
     }
 
     @Override
-    public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception {
+    public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
+      // Release buffer on removal.
       cBuf.release();
       cBuf = null;
     }
index e4a410aa9c342162fc3aef26bd210cc3765fd7dd..0014185b85c0e929365203e3f449bcc3e9466d86 100644 (file)
@@ -186,9 +186,7 @@ public final class RecoverLeaseFSUtils {
     return recovered;
   }
 
-  /**
-   * @return Detail to append to any log message around lease recovering.
-   */
+  /** Returns Detail to append to any log message around lease recovering. */
   private static String getLogMessageDetail(final int nbAttempt, final Path p,
     final long startWaiting) {
     return "attempt=" + nbAttempt + " on file=" + p + " after "
index 26cbbe034a58e57830de7df46a64b0407c98452c..3c3852831033c740cbe54cb6be95917f2f95fcd2 100644 (file)
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.io.asyncfs;
 
+import static org.apache.hadoop.hbase.util.FutureUtils.consume;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 import static org.hamcrest.CoreMatchers.instanceOf;
 import static org.hamcrest.MatcherAssert.assertThat;
@@ -93,9 +94,9 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
   }
 
   @AfterClass
-  public static void tearDown() throws IOException, InterruptedException {
+  public static void tearDown() throws Exception {
     if (EVENT_LOOP_GROUP != null) {
-      EVENT_LOOP_GROUP.shutdownGracefully().sync();
+      EVENT_LOOP_GROUP.shutdownGracefully().get();
     }
     shutdownMiniDFSCluster();
   }
@@ -262,7 +263,7 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
     byte[] b = new byte[50 * 1024 * 1024];
     Bytes.random(b);
     out.write(b);
-    out.flush(false);
+    consume(out.flush(false));
     assertEquals(b.length, out.flush(false).get().longValue());
     out.close();
     assertEquals(b.length, FS.getFileStatus(f).getLen());
index 3a9c2979b6cf530bd21eda3e0e57a30e2ffe834a..77752789dbb369c4698e21baf1653f116775ed6c 100644 (file)
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.io.asyncfs;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
@@ -103,12 +102,12 @@ public class TestFanOutOneBlockAsyncDFSOutputHang extends AsyncFSTestBase {
   }
 
   @AfterClass
-  public static void tearDown() throws IOException, InterruptedException {
+  public static void tearDown() throws Exception {
     if (OUT != null) {
       OUT.recoverAndClose(null);
     }
     if (EVENT_LOOP_GROUP != null) {
-      EVENT_LOOP_GROUP.shutdownGracefully().sync();
+      EVENT_LOOP_GROUP.shutdownGracefully().get();
     }
     shutdownMiniDFSCluster();
   }
@@ -185,6 +184,8 @@ public class TestFanOutOneBlockAsyncDFSOutputHang extends AsyncFSTestBase {
         public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
           if (!(msg instanceof ByteBuf)) {
             ctx.fireChannelRead(msg);
+          } else {
+            ((ByteBuf) msg).release();
           }
         }
       });
index cb936a4e7c65bf7786744617b936f1a1f0e5d34e..d1ce128b118dadb39268a974e7610c78dd812caa 100644 (file)
@@ -53,9 +53,9 @@ public class TestLocalAsyncOutput {
   private static StreamSlowMonitor MONITOR;
 
   @AfterClass
-  public static void tearDownAfterClass() throws IOException {
+  public static void tearDownAfterClass() throws Exception {
     TEST_UTIL.cleanupTestDir();
-    GROUP.shutdownGracefully();
+    GROUP.shutdownGracefully().get();
     MONITOR = StreamSlowMonitor.create(TEST_UTIL.getConfiguration(), "testMonitor");
   }
 
index cb5fb4006d3ec739e3028d83d0775e5705149ac1..479b8f4e603419db17851d2d34d0ea5afc3a97fb 100644 (file)
@@ -193,9 +193,9 @@ public class TestSaslFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
   }
 
   @AfterClass
-  public static void tearDownAfterClass() throws IOException, InterruptedException {
+  public static void tearDownAfterClass() throws Exception {
     if (EVENT_LOOP_GROUP != null) {
-      EVENT_LOOP_GROUP.shutdownGracefully().sync();
+      EVENT_LOOP_GROUP.shutdownGracefully().get();
     }
     if (KDC != null) {
       KDC.stop();
index 60a492bd2dc305e131b1daca35feeb02078edd48..f45b949079bb71ee85acdd5d5e7a058fb7cfcecf 100644 (file)
@@ -188,4 +188,13 @@ public final class HBaseKerberosUtils {
       UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keyTabFileLocation);
     return ugi;
   }
+
+  public static UserGroupInformation loginKerberosPrincipal(String krbKeytab, String krbPrincipal)
+    throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+    UserGroupInformation.loginUserFromKeytab(krbPrincipal, krbKeytab);
+    return UserGroupInformation.getLoginUser();
+  }
 }
index 8a8f65951acf76bbe9c2a807181bdf1b7b0cafc9..fdad0d549830ac0e56a4eda809c5342e97623c27 100644 (file)
@@ -37,7 +37,6 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder;
 
 /**
  * An object to encapsulate the information for each backup session
@@ -451,13 +450,13 @@ public class BackupInfo implements Comparable<BackupInfo> {
     return toProtosBackupInfo().toByteArray();
   }
 
-  private void setBackupTableInfoMap(Builder builder) {
+  private void setBackupTableInfoMap(BackupProtos.BackupInfo.Builder builder) {
     for (Entry<TableName, BackupTableInfo> entry : backupTableInfoMap.entrySet()) {
       builder.addBackupTableInfo(entry.getValue().toProto());
     }
   }
 
-  private void setTableSetTimestampMap(Builder builder) {
+  private void setTableSetTimestampMap(BackupProtos.BackupInfo.Builder builder) {
     if (this.getTableSetTimestampMap() != null) {
       for (Entry<TableName, Map<String, Long>> entry : this.getTableSetTimestampMap().entrySet()) {
         builder.putTableSetTimestamp(entry.getKey().getNameAsString(),
@@ -531,10 +530,9 @@ public class BackupInfo implements Comparable<BackupInfo> {
     sb.append("Type=" + getType()).append(",");
     sb.append("Tables=" + getTableListAsString()).append(",");
     sb.append("State=" + getState()).append(",");
-    Date date = null;
     Calendar cal = Calendar.getInstance();
     cal.setTimeInMillis(getStartTs());
-    date = cal.getTime();
+    Date date = cal.getTime();
     sb.append("Start time=" + date).append(",");
     if (state == BackupState.FAILED) {
       sb.append("Failed message=" + getFailedMsg()).append(",");
@@ -560,7 +558,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
   }
 
   public String getTableListAsString() {
-    StringBuffer sb = new StringBuffer();
+    StringBuilder sb = new StringBuilder();
     sb.append("{");
     sb.append(StringUtils.join(backupTableInfoMap.keySet(), ","));
     sb.append("}");
index b4d73e134fabce10b2d480c200c37b13525509f7..cb01469c8f18aeb7ad6b27e28197aaa6375dd6b0 100644 (file)
@@ -83,7 +83,7 @@ public class RestoreDriver extends AbstractHBaseTool {
     Log4jUtils.disableZkAndClientLoggers();
   }
 
-  private int parseAndRun(String[] args) throws IOException {
+  private int parseAndRun() throws IOException {
     // Check if backup is enabled
     if (!BackupManager.isBackupEnabled(getConf())) {
       System.err.println(BackupRestoreConstants.ENABLE_BACKUP);
@@ -146,7 +146,7 @@ public class RestoreDriver extends AbstractHBaseTool {
       if (cmd.hasOption(OPTION_SET)) {
         String setName = cmd.getOptionValue(OPTION_SET);
         try {
-          tables = getTablesForSet(conn, setName, conf);
+          tables = getTablesForSet(conn, setName);
         } catch (IOException e) {
           System.out.println("ERROR: " + e.getMessage() + " for setName=" + setName);
           printToolUsage();
@@ -182,8 +182,7 @@ public class RestoreDriver extends AbstractHBaseTool {
     return 0;
   }
 
-  private String getTablesForSet(Connection conn, String name, Configuration conf)
-    throws IOException {
+  private String getTablesForSet(Connection conn, String name) throws IOException {
     try (final BackupSystemTable table = new BackupSystemTable(conn)) {
       List<TableName> tables = table.describeBackupSet(name);
 
@@ -214,7 +213,7 @@ public class RestoreDriver extends AbstractHBaseTool {
 
   @Override
   protected int doWork() throws Exception {
-    return parseAndRun(cmd.getArgs());
+    return parseAndRun();
   }
 
   public static void main(String[] args) throws Exception {
index 53295401f761864571b32728c378ceb1fc045680..ce9c5bbe8fae47ba71b1ed847497ec167d662266 100644 (file)
@@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 
+import org.apache.hbase.thirdparty.com.google.common.base.Splitter;
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
@@ -310,7 +311,7 @@ public final class BackupCommands {
       String setName = null;
       if (cmdline.hasOption(OPTION_SET)) {
         setName = cmdline.getOptionValue(OPTION_SET);
-        tables = getTablesForSet(setName, getConf());
+        tables = getTablesForSet(setName);
 
         if (tables == null) {
           System.out
@@ -371,7 +372,7 @@ public final class BackupCommands {
       }
     }
 
-    private String getTablesForSet(String name, Configuration conf) throws IOException {
+    private String getTablesForSet(String name) throws IOException {
       try (final BackupSystemTable table = new BackupSystemTable(conn)) {
         List<TableName> tables = table.describeBackupSet(name);
 
@@ -1001,14 +1002,14 @@ public final class BackupCommands {
           processSetDescribe(args);
           break;
         case SET_LIST:
-          processSetList(args);
+          processSetList();
           break;
         default:
           break;
       }
     }
 
-    private void processSetList(String[] args) throws IOException {
+    private void processSetList() throws IOException {
       super.execute();
 
       // List all backup set names
@@ -1087,17 +1088,12 @@ public final class BackupCommands {
         throw new IOException(INCORRECT_USAGE);
       }
       super.execute();
-
       String setName = args[2];
-      String[] tables = args[3].split(",");
-      TableName[] tableNames = new TableName[tables.length];
-      for (int i = 0; i < tables.length; i++) {
-        tableNames[i] = TableName.valueOf(tables[i]);
-      }
+      TableName[] tableNames =
+        Splitter.on(',').splitToStream(args[3]).map(TableName::valueOf).toArray(TableName[]::new);
       try (final BackupAdminImpl admin = new BackupAdminImpl(conn)) {
         admin.addToBackupSet(setName, tableNames);
       }
-
     }
 
     private BackupCommand getCommand(String cmdStr) throws IOException {
index 482b2a266db734977588a8bbb1a309eeddbfa207..3a1cbd55c58e20c2a0f2a077bcbf1418d06a928e 100644 (file)
@@ -366,7 +366,6 @@ public class BackupManifest {
   }
 
   // backup image directory
-  private String tableBackupDir = null;
   private BackupImage backupImage;
 
   /**
@@ -385,7 +384,6 @@ public class BackupManifest {
    * @param backup The ongoing backup session info
    */
   public BackupManifest(BackupInfo backup, TableName table) {
-    this.tableBackupDir = backup.getTableBackupDir(table);
     List<TableName> tables = new ArrayList<TableName>();
     tables.add(table);
     BackupImage.Builder builder = BackupImage.newBuilder();
@@ -468,7 +466,7 @@ public class BackupManifest {
 
   /**
    * TODO: fix it. Persist the manifest file.
-   * @throws IOException IOException when storing the manifest file.
+   * @throws BackupException if an error occurred while storing the manifest file.
    */
   public void store(Configuration conf) throws BackupException {
     byte[] data = backupImage.toProto().toByteArray();
@@ -526,7 +524,7 @@ public class BackupManifest {
       restoreImages.put(Long.valueOf(image.startTs), image);
     }
     return new ArrayList<>(
-      reverse ? (restoreImages.descendingMap().values()) : (restoreImages.values()));
+      reverse ? restoreImages.descendingMap().values() : restoreImages.values());
   }
 
   /**
index 19ddd8141677c9c205aa7b6c2b56b189d7a4a325..04f43b5b0ea15b7038f1ef6a55711473a49abfbf 100644 (file)
@@ -19,6 +19,8 @@ package org.apache.hadoop.hbase.backup.impl;
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -69,6 +71,9 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.base.Splitter;
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
+
 import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 
@@ -237,6 +242,7 @@ public final class BackupSystemTable implements Closeable {
       try {
         Thread.sleep(100);
       } catch (InterruptedException e) {
+        throw (IOException) new InterruptedIOException().initCause(e);
       }
       if (EnvironmentEdgeManager.currentTime() - startTime > TIMEOUT) {
         throw new IOException(
@@ -302,6 +308,7 @@ public final class BackupSystemTable implements Closeable {
   public Map<byte[], List<Path>>[] readBulkLoadedFiles(String backupId, List<TableName> sTableList)
     throws IOException {
     Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId);
+    @SuppressWarnings("unchecked")
     Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList == null ? 1 : sTableList.size()];
     try (Table table = connection.getTable(bulkLoadTableName);
       ResultScanner scanner = table.getScanner(scan)) {
@@ -574,7 +581,7 @@ public final class BackupSystemTable implements Closeable {
       if (val.length == 0) {
         return null;
       }
-      return new String(val);
+      return new String(val, StandardCharsets.UTF_8);
     }
   }
 
@@ -1639,7 +1646,8 @@ public final class BackupSystemTable implements Closeable {
       if (val.length == 0) {
         return null;
       }
-      return new String(val).split(",");
+      return Splitter.on(',').splitToStream(new String(val, StandardCharsets.UTF_8))
+        .toArray(String[]::new);
     }
   }
 
@@ -1654,7 +1662,7 @@ public final class BackupSystemTable implements Closeable {
     Get get = new Get(MERGE_OP_ROW);
     try (Table table = connection.getTable(tableName)) {
       Result res = table.get(get);
-      return (!res.isEmpty());
+      return !res.isEmpty();
     }
   }
 
@@ -1720,7 +1728,8 @@ public final class BackupSystemTable implements Closeable {
       if (val.length == 0) {
         return null;
       }
-      return new String(val).split(",");
+      return Splitter.on(',').splitToStream(new String(val, StandardCharsets.UTF_8))
+        .toArray(String[]::new);
     }
   }
 
@@ -1737,20 +1746,22 @@ public final class BackupSystemTable implements Closeable {
   }
 
   static String getTableNameFromOrigBulkLoadRow(String rowStr) {
-    String[] parts = rowStr.split(BLK_LD_DELIM);
-    return parts[1];
+    // format is bulk : namespace : table : region : file
+    return Iterators.get(Splitter.onPattern(BLK_LD_DELIM).split(rowStr).iterator(), 1);
   }
 
   static String getRegionNameFromOrigBulkLoadRow(String rowStr) {
     // format is bulk : namespace : table : region : file
-    String[] parts = rowStr.split(BLK_LD_DELIM);
+    List<String> parts = Splitter.onPattern(BLK_LD_DELIM).splitToList(rowStr);
+    Iterator<String> i = parts.iterator();
     int idx = 3;
-    if (parts.length == 4) {
+    if (parts.size() == 4) {
       // the table is in default namespace
       idx = 2;
     }
-    LOG.debug("bulk row string " + rowStr + " region " + parts[idx]);
-    return parts[idx];
+    String region = Iterators.get(i, idx);
+    LOG.debug("bulk row string " + rowStr + " region " + region);
+    return region;
   }
 
   /*
index 0e800ea520b44f39bb45d8a567e490e87a6d941d..211e9f96c89c836723c3957cc27c490466fb42c3 100644 (file)
@@ -204,8 +204,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
       String tgtDest = backupInfo.getBackupRootDir() + Path.SEPARATOR + backupInfo.getBackupId();
       int attempt = 1;
       while (activeFiles.size() > 0) {
-        LOG.info(
-          "Copy " + activeFiles.size() + " active bulk loaded files. Attempt =" + (attempt++));
+        LOG.info("Copy " + activeFiles.size() + " active bulk loaded files. Attempt =" + attempt++);
         String[] toCopy = new String[activeFiles.size()];
         activeFiles.toArray(toCopy);
         // Active file can be archived during copy operation,
index 3c0eafadb82aa55c1a69f7cf03a392bdb8537fdf..9ec2442a3d9342b2630d0bc28470e9f27f7b970f 100644 (file)
@@ -181,7 +181,7 @@ public class RestoreTablesClient {
 
   private List<Path> getFilesRecursively(String fileBackupDir)
     throws IllegalArgumentException, IOException {
-    FileSystem fs = FileSystem.get((new Path(fileBackupDir)).toUri(), new Configuration());
+    FileSystem fs = FileSystem.get(new Path(fileBackupDir).toUri(), new Configuration());
     List<Path> list = new ArrayList<>();
     RemoteIterator<LocatedFileStatus> it = fs.listFiles(new Path(fileBackupDir), true);
     while (it.hasNext()) {
index 0ca5509262fa08f0bf21f5d92fd7c430c0f8c20a..2bb2c13e4dd8e059e26210e145059baf9d1e99e5 100644 (file)
@@ -322,7 +322,7 @@ public abstract class TableBackupClient {
    * @return meta data dir
    */
   protected String obtainBackupMetaDataStr(BackupInfo backupInfo) {
-    StringBuffer sb = new StringBuffer();
+    StringBuilder sb = new StringBuilder();
     sb.append("type=" + backupInfo.getType() + ",tablelist=");
     for (TableName table : backupInfo.getTables()) {
       sb.append(table + ";");
index 5dca4878885575b23235833eb3adbf511d179883..51a276df4c5a32d9a5283554d06181a7ee3caf88 100644 (file)
@@ -47,7 +47,6 @@ import org.apache.hadoop.tools.DistCp;
 import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.DistCpOptions;
 import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -127,7 +126,7 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
    * @param backupInfo  backup info
    * @param newProgress progress
    * @param bytesCopied bytes copied
-   * @throws NoNodeException exception
+   * @throws IOException exception
    */
   static void updateProgress(BackupInfo backupInfo, BackupManager backupManager, int newProgress,
     long bytesCopied) throws IOException {
@@ -361,7 +360,7 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
    * @param conf     The hadoop configuration
    * @param copyType The backup copy type
    * @param options  Options for customized ExportSnapshot or DistCp
-   * @throws Exception exception
+   * @throws IOException exception
    */
   @Override
   public int copy(BackupInfo context, BackupManager backupManager, Configuration conf,
index 9a65ed929d7fe0808bdaee6a2641da045381ad53..3b4cf0246d73bc23d10062c722c423d79cb2fe52 100644 (file)
@@ -22,11 +22,12 @@ import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.util.ArrayDeque;
 import java.util.ArrayList;
+import java.util.Deque;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
-import java.util.Stack;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -257,7 +258,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
    */
   protected Path convertToDest(Path p, Path backupDirPath) {
     String backupId = backupDirPath.getName();
-    Stack<String> stack = new Stack<String>();
+    Deque<String> stack = new ArrayDeque<String>();
     String name = null;
     while (true) {
       name = p.getName();
index 4802e8b3ad639144fa153e488d977b8393dbb3b5..5b21feeba75fafa11b14331da8e6499b4a675a6a 100644 (file)
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.backup.regionserver;
 
+import java.nio.charset.StandardCharsets;
 import java.util.HashMap;
 import java.util.List;
 import java.util.concurrent.Callable;
@@ -56,7 +57,7 @@ public class LogRollBackupSubprocedure extends Subprocedure {
     this.rss = rss;
     this.taskManager = taskManager;
     if (data != null) {
-      backupRoot = new String(data);
+      backupRoot = new String(data, StandardCharsets.UTF_8);
     }
   }
 
index 4b4ebd361a64816abbe27488ed380f28e102dbbc..ef97b195e28747332718b3c7108cf89fddf9ee2e 100644 (file)
@@ -63,12 +63,15 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.base.Splitter;
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
+
 /**
  * A collection for methods used by multiple classes to backup HBase tables.
  */
 @InterfaceAudience.Private
 public final class BackupUtils {
-  protected static final Logger LOG = LoggerFactory.getLogger(BackupUtils.class);
+  private static final Logger LOG = LoggerFactory.getLogger(BackupUtils.class);
   public static final String LOGNAME_SEPARATOR = ".";
   public static final int MILLISEC_IN_HOUR = 3600000;
 
@@ -136,9 +139,10 @@ public final class BackupUtils {
         // write a copy of descriptor to the target directory
         Path target = new Path(backupInfo.getTableBackupDir(table));
         FileSystem targetFs = target.getFileSystem(conf);
-        FSTableDescriptors descriptors =
-          new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf));
-        descriptors.createTableDescriptorForTableDirectory(target, orig, false);
+        try (FSTableDescriptors descriptors =
+          new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf))) {
+          descriptors.createTableDescriptorForTableDirectory(target, orig, false);
+        }
         LOG.debug("Attempting to copy table info for:" + table + " target: " + target
           + " descriptor: " + orig);
         LOG.debug("Finished copying tableinfo.");
@@ -279,13 +283,8 @@ public final class BackupUtils {
     if (tables == null) {
       return null;
     }
-    String[] tableArray = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
-
-    TableName[] ret = new TableName[tableArray.length];
-    for (int i = 0; i < tableArray.length; i++) {
-      ret[i] = TableName.valueOf(tableArray[i]);
-    }
-    return ret;
+    return Splitter.on(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND).splitToStream(tables)
+      .map(TableName::valueOf).toArray(TableName[]::new);
   }
 
   /**
@@ -594,8 +593,7 @@ public final class BackupUtils {
       }
 
       private long getTimestamp(String backupId) {
-        String[] split = backupId.split("_");
-        return Long.parseLong(split[1]);
+        return Long.parseLong(Iterators.get(Splitter.on('_').split(backupId).iterator(), 1));
       }
     });
     return infos;
@@ -734,7 +732,7 @@ public final class BackupUtils {
   public static String findMostRecentBackupId(String[] backupIds) {
     long recentTimestamp = Long.MIN_VALUE;
     for (String backupId : backupIds) {
-      long ts = Long.parseLong(backupId.split("_")[1]);
+      long ts = Long.parseLong(Iterators.get(Splitter.on('_').split(backupId).iterator(), 1));
       if (ts > recentTimestamp) {
         recentTimestamp = ts;
       }
index e660ec7b157e2d7bbb83b462b8388b8897c7251e..bf2aa14046dbc14c19fcf6e879282d355ad0596d 100644 (file)
@@ -325,8 +325,7 @@ public class RestoreTool {
             + ", will only create table");
         }
         tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor);
-        checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor,
-          truncateIfExists);
+        checkAndCreateTable(conn, newTableName, null, tableDescriptor, truncateIfExists);
         return;
       } else {
         throw new IllegalStateException(
@@ -347,8 +346,7 @@ public class RestoreTool {
 
       // should only try to create the table with all region informations, so we could pre-split
       // the regions in fine grain
-      checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, regionPathList,
-        tableDescriptor, truncateIfExists);
+      checkAndCreateTable(conn, newTableName, regionPathList, tableDescriptor, truncateIfExists);
       RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
       Path[] paths = new Path[regionPathList.size()];
       regionPathList.toArray(paths);
@@ -460,17 +458,15 @@ public class RestoreTool {
    * Prepare the table for bulkload, most codes copied from {@code createTable} method in
    * {@code BulkLoadHFilesTool}.
    * @param conn             connection
-   * @param tableBackupPath  path
-   * @param tableName        table name
    * @param targetTableName  target table name
    * @param regionDirList    region directory list
    * @param htd              table descriptor
    * @param truncateIfExists truncates table if exists
    * @throws IOException exception
    */
-  private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableName tableName,
-    TableName targetTableName, ArrayList<Path> regionDirList, TableDescriptor htd,
-    boolean truncateIfExists) throws IOException {
+  private void checkAndCreateTable(Connection conn, TableName targetTableName,
+    ArrayList<Path> regionDirList, TableDescriptor htd, boolean truncateIfExists)
+    throws IOException {
     try (Admin admin = conn.getAdmin()) {
       boolean createNew = false;
       if (admin.tableExists(targetTableName)) {
index 9246c74172fe3e70e9e15ef2cb1c2e9fb47362e5..7b5095a897e2cc629c3cdcfb104994bb924270a4 100644 (file)
@@ -330,9 +330,6 @@ public class TestBackupBase {
     }
   }
 
-  /**
-   * @throws Exception if deleting the archive directory or shutting down the mini cluster fails
-   */
   @AfterClass
   public static void tearDown() throws Exception {
     try {
index 307440a10ed7ea37e7dcd45cf52d74d990544f8f..b71e084e8404feda641a97a7f14f77946553e00e 100644 (file)
@@ -22,9 +22,8 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.HashMap;
+import java.util.IdentityHashMap;
 import java.util.List;
-import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -63,9 +62,6 @@ public class TestBackupHFileCleaner {
   static FileSystem fs = null;
   Path root;
 
-  /**
-   * @throws Exception if starting the mini cluster or getting the filesystem fails
-   */
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
@@ -74,9 +70,6 @@ public class TestBackupHFileCleaner {
     fs = FileSystem.get(conf);
   }
 
-  /**
-   * @throws Exception if closing the filesystem or shutting down the mini cluster fails
-   */
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
     if (fs != null) {
@@ -109,12 +102,13 @@ public class TestBackupHFileCleaner {
     BackupHFileCleaner cleaner = new BackupHFileCleaner();
     cleaner.setConf(conf);
     cleaner.setCheckForFullyBackedUpTables(false);
-    // 3. Assert that file as is should be deletable
     List<FileStatus> stats = new ArrayList<>();
+    // Prime the cleaner
+    cleaner.getDeletableFiles(stats);
+    // 3. Assert that file as is should be deletable
     FileStatus stat = fs.getFileStatus(file);
     stats.add(stat);
     Iterable<FileStatus> deletable = cleaner.getDeletableFiles(stats);
-    deletable = cleaner.getDeletableFiles(stats);
     boolean found = false;
     for (FileStatus stat1 : deletable) {
       if (stat.equals(stat1)) {
@@ -132,15 +126,15 @@ public class TestBackupHFileCleaner {
       BackupSystemTable sysTbl = new BackupSystemTable(conn)) {
       List<TableName> sTableList = new ArrayList<>();
       sTableList.add(tableName);
-      Map<byte[], List<Path>>[] maps = new Map[1];
-      maps[0] = new HashMap<>();
+      @SuppressWarnings("unchecked")
+      IdentityHashMap<byte[], List<Path>>[] maps = new IdentityHashMap[1];
+      maps[0] = new IdentityHashMap<>();
       maps[0].put(Bytes.toBytes(famName), list);
       sysTbl.writeBulkLoadedFiles(sTableList, maps, "1");
     }
 
     // 5. Assert file should not be deletable
     deletable = cleaner.getDeletableFiles(stats);
-    deletable = cleaner.getDeletableFiles(stats);
     found = false;
     for (FileStatus stat1 : deletable) {
       if (stat.equals(stat1)) {
index db9d63bca943646e054c8997806ed050d76bdf48..21883fa6eaadac84e80e8bb96245cff8da7a6a04 100644 (file)
@@ -109,10 +109,10 @@ public class TestBackupSystemTable {
 
   @Test
   public void testWriteReadBackupStartCode() throws IOException {
-    Long code = 100L;
+    long code = 100L;
     table.writeBackupStartCode(code, "root");
     String readCode = table.readBackupStartCode("root");
-    assertEquals(code, new Long(Long.parseLong(readCode)));
+    assertEquals(code, Long.parseLong(readCode));
     cleanBackupTable();
   }
 
@@ -126,7 +126,7 @@ public class TestBackupSystemTable {
   }
 
   @Test
-  public void testBackupHistory() throws IOException {
+  public void testBackupHistory() throws Exception {
     int n = 10;
     List<BackupInfo> list = createBackupInfoList(n);
 
@@ -153,7 +153,7 @@ public class TestBackupSystemTable {
   }
 
   @Test
-  public void testBackupDelete() throws IOException {
+  public void testBackupDelete() throws Exception {
     try (BackupSystemTable table = new BackupSystemTable(conn)) {
       int n = 10;
       List<BackupInfo> list = createBackupInfoList(n);
@@ -226,29 +226,29 @@ public class TestBackupSystemTable {
     tables2.add(TableName.valueOf("t5"));
 
     table.addIncrementalBackupTableSet(tables1, "root");
-    BackupSystemTable table = new BackupSystemTable(conn);
-    TreeSet<TableName> res1 = (TreeSet<TableName>) table.getIncrementalBackupTableSet("root");
-    assertTrue(tables1.size() == res1.size());
-    Iterator<TableName> desc1 = tables1.descendingIterator();
-    Iterator<TableName> desc2 = res1.descendingIterator();
-    while (desc1.hasNext()) {
-      assertEquals(desc1.next(), desc2.next());
-    }
-
-    table.addIncrementalBackupTableSet(tables2, "root");
-    TreeSet<TableName> res2 = (TreeSet<TableName>) table.getIncrementalBackupTableSet("root");
-    assertTrue((tables2.size() + tables1.size() - 1) == res2.size());
-
-    tables1.addAll(tables2);
 
-    desc1 = tables1.descendingIterator();
-    desc2 = res2.descendingIterator();
-
-    while (desc1.hasNext()) {
-      assertEquals(desc1.next(), desc2.next());
+    try (BackupSystemTable systemTable = new BackupSystemTable(conn)) {
+      TreeSet<TableName> res1 =
+        (TreeSet<TableName>) systemTable.getIncrementalBackupTableSet("root");
+      assertTrue(tables1.size() == res1.size());
+      Iterator<TableName> desc1 = tables1.descendingIterator();
+      Iterator<TableName> desc2 = res1.descendingIterator();
+      while (desc1.hasNext()) {
+        assertEquals(desc1.next(), desc2.next());
+      }
+      systemTable.addIncrementalBackupTableSet(tables2, "root");
+      TreeSet<TableName> res2 =
+        (TreeSet<TableName>) systemTable.getIncrementalBackupTableSet("root");
+      assertTrue((tables2.size() + tables1.size() - 1) == res2.size());
+      tables1.addAll(tables2);
+      desc1 = tables1.descendingIterator();
+      desc2 = res2.descendingIterator();
+      while (desc1.hasNext()) {
+        assertEquals(desc1.next(), desc2.next());
+      }
     }
-    cleanBackupTable();
 
+    cleanBackupTable();
   }
 
   @Test
@@ -274,9 +274,9 @@ public class TestBackupSystemTable {
     for (TableName t : tables) {
       Map<String, Long> rstm = result.get(t);
       assertNotNull(rstm);
-      assertEquals(rstm.get("rs1:100"), new Long(100L));
-      assertEquals(rstm.get("rs2:100"), new Long(101L));
-      assertEquals(rstm.get("rs3:100"), new Long(103L));
+      assertEquals(rstm.get("rs1:100"), Long.valueOf(100L));
+      assertEquals(rstm.get("rs2:100"), Long.valueOf(101L));
+      assertEquals(rstm.get("rs3:100"), Long.valueOf(103L));
     }
 
     Set<TableName> tables1 = new TreeSet<>();
@@ -301,22 +301,22 @@ public class TestBackupSystemTable {
       Map<String, Long> rstm = result.get(t);
       assertNotNull(rstm);
       if (t.equals(TableName.valueOf("t3")) == false) {
-        assertEquals(rstm.get("rs1:100"), new Long(100L));
-        assertEquals(rstm.get("rs2:100"), new Long(101L));
-        assertEquals(rstm.get("rs3:100"), new Long(103L));
+        assertEquals(rstm.get("rs1:100"), Long.valueOf(100L));
+        assertEquals(rstm.get("rs2:100"), Long.valueOf(101L));
+        assertEquals(rstm.get("rs3:100"), Long.valueOf(103L));
       } else {
-        assertEquals(rstm.get("rs1:100"), new Long(200L));
-        assertEquals(rstm.get("rs2:100"), new Long(201L));
-        assertEquals(rstm.get("rs3:100"), new Long(203L));
+        assertEquals(rstm.get("rs1:100"), Long.valueOf(200L));
+        assertEquals(rstm.get("rs2:100"), Long.valueOf(201L));
+        assertEquals(rstm.get("rs3:100"), Long.valueOf(203L));
       }
     }
 
     for (TableName t : tables1) {
       Map<String, Long> rstm = result.get(t);
       assertNotNull(rstm);
-      assertEquals(rstm.get("rs1:100"), new Long(200L));
-      assertEquals(rstm.get("rs2:100"), new Long(201L));
-      assertEquals(rstm.get("rs3:100"), new Long(203L));
+      assertEquals(rstm.get("rs1:100"), Long.valueOf(200L));
+      assertEquals(rstm.get("rs2:100"), Long.valueOf(201L));
+      assertEquals(rstm.get("rs3:100"), Long.valueOf(203L));
     }
 
     cleanBackupTable();
@@ -485,15 +485,12 @@ public class TestBackupSystemTable {
     return ctxt;
   }
 
-  private List<BackupInfo> createBackupInfoList(int size) {
+  private List<BackupInfo> createBackupInfoList(int size) throws InterruptedException {
     List<BackupInfo> list = new ArrayList<>();
     for (int i = 0; i < size; i++) {
       list.add(createBackupInfo());
-      try {
-        Thread.sleep(10);
-      } catch (InterruptedException e) {
-        e.printStackTrace();
-      }
+      // XXX Why do we need this sleep?
+      Thread.sleep(10);
     }
     return list;
   }
index 28fb025bb32af2b60785b28c78e9e87f958d1a2b..a182144a8abd30a0d58b00d1f535957f0861d901 100644 (file)
@@ -114,8 +114,6 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
     assertTrue(checkSucceeded(backupIdIncMultiple1));
     // Delete all data in table1
     TEST_UTIL.deleteTableData(table1);
-    // #5.1 - check tables for full restore */
-    Admin hAdmin = TEST_UTIL.getAdmin();
 
     // #6 - restore incremental backup for table1
     TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
index 006ff2e731d4885e3a24f6da55aea24392a09067..68b08544196e08c319c70f6c58ba90b28e828d9c 100644 (file)
@@ -111,7 +111,7 @@ public class FavoredNodeAssignmentHelper {
           break;
         }
       }
-      serverList.add((sn));
+      serverList.add(sn);
       this.regionServerToRackMap.put(sn.getHostname(), rackName);
     }
   }
@@ -235,7 +235,7 @@ public class FavoredNodeAssignmentHelper {
           if (numIterations % rackList.size() == 0) {
             if (++serverIndex >= maxRackSize) serverIndex = 0;
           }
-          if ((++rackIndex) >= rackList.size()) {
+          if (++rackIndex >= rackList.size()) {
             rackIndex = 0; // reset the rack index to 0
           }
         } else break;
@@ -259,7 +259,7 @@ public class FavoredNodeAssignmentHelper {
       if (numIterations % rackList.size() == 0) {
         ++serverIndex;
       }
-      if ((++rackIndex) >= rackList.size()) {
+      if (++rackIndex >= rackList.size()) {
         rackIndex = 0; // reset the rack index to 0
       }
     }
@@ -298,7 +298,7 @@ public class FavoredNodeAssignmentHelper {
     if (getTotalNumberOfRacks() == 1) {
       favoredNodes = singleRackCase(regionInfo, primaryRS, primaryRack);
     } else {
-      favoredNodes = multiRackCase(regionInfo, primaryRS, primaryRack);
+      favoredNodes = multiRackCase(primaryRS, primaryRack);
     }
     return favoredNodes;
   }
@@ -483,14 +483,12 @@ public class FavoredNodeAssignmentHelper {
    * has only one region server, then we place primary and tertiary on one rack and secondary on
    * another. The aim is two distribute the three favored nodes on >= 2 racks. TODO: see how we can
    * use generateMissingFavoredNodeMultiRack API here
-   * @param regionInfo  Region for which we are trying to generate FN
    * @param primaryRS   The primary favored node.
    * @param primaryRack The rack of the primary favored node.
    * @return Array containing secondary and tertiary favored nodes.
    * @throws IOException Signals that an I/O exception has occurred.
    */
-  private ServerName[] multiRackCase(RegionInfo regionInfo, ServerName primaryRS,
-    String primaryRack) throws IOException {
+  private ServerName[] multiRackCase(ServerName primaryRS, String primaryRack) throws IOException {
 
     List<ServerName> favoredNodes = Lists.newArrayList(primaryRS);
     // Create the secondary and tertiary pair
index 224a32c222af8c00a1a34ce48454ae7b1ee90ffe..4c6f2b3cc27db518d0813965b6dfa2e75035ec93 100644 (file)
@@ -99,7 +99,7 @@ public class FavoredNodesPlan {
   }
 
   /**
-   * @return the mapping between each region to its favored region server list
+   * Return the mapping between each region to its favored region server list.
    */
   public Map<String, List<ServerName>> getAssignmentMap() {
     // Make a deep copy so changes don't harm our copy of favoredNodesMap.
@@ -119,7 +119,7 @@ public class FavoredNodesPlan {
     if (o == null) {
       return false;
     }
-    if (getClass() != o.getClass()) {
+    if (!(o instanceof FavoredNodesPlan)) {
       return false;
     }
     // To compare the map from object o is identical to current assignment map.
index 2c49e26e9cf9e31ba78bc51d536459c512c3ec70..8858e13da705c6e98496506c8b9f212ba578c333 100644 (file)
@@ -409,8 +409,8 @@ public class AssignmentVerificationReport {
   }
 
   /**
-   * @return list which contains just 3 elements: average dispersion score, max dispersion score and
-   *         min dispersion score as first, second and third element respectively.
+   * Return a list which contains 3 elements: average dispersion score, max dispersion score and min
+   * dispersion score as first, second and third elements, respectively.
    */
   public List<Float> getDispersionInformation() {
     List<Float> dispersion = new ArrayList<>();
@@ -578,7 +578,7 @@ public class AssignmentVerificationReport {
     }
     int i = 0;
     for (ServerName addr : serverSet) {
-      if ((i++) % 3 == 0) {
+      if (i++ % 3 == 0) {
         System.out.print("\n\t\t\t");
       }
       System.out.print(addr.getAddress() + " ; ");
index d6909dc2802e6a221cec1d0a1d8f97f20e47f300..5632fcc02ff783f8da09a48d4cf13eacb525728a 100644 (file)
@@ -148,10 +148,7 @@ public class RegionPlan implements Comparable<RegionPlan> {
     if (this == obj) {
       return true;
     }
-    if (obj == null) {
-      return false;
-    }
-    if (getClass() != obj.getClass()) {
+    if (!(obj instanceof RegionPlan)) {
       return false;
     }
     RegionPlan other = (RegionPlan) obj;
index a54a410fcdf7012f4380cfaed1e3579542d30b90..a7ae8b4d1a5a3eb8483747969f0bd68beac5e6a9 100644 (file)
@@ -218,7 +218,7 @@ class BalancerClusterState {
     colocatedReplicaCountsPerHost = new Int2IntCounterMap[numHosts];
     colocatedReplicaCountsPerRack = new Int2IntCounterMap[numRacks];
 
-    int tableIndex = 0, regionIndex = 0, regionPerServerIndex = 0;
+    int regionIndex = 0, regionPerServerIndex = 0;
 
     for (Map.Entry<ServerName, List<RegionInfo>> entry : clusterState.entrySet()) {
       if (entry.getKey() == null) {
index 3b91bec03ecd55849b0f71bcc3e577305edc396d..fbe91a921daa39bed951fd12a4d13c7d73f93585 100644 (file)
@@ -87,9 +87,7 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer
     return fnPickers;
   }
 
-  /**
-   * @return any candidate generator in random
-   */
+  /** Returns any candidate generator in random */
   @Override
   protected CandidateGenerator getRandomGenerator() {
     return candidateGenerators.get(ThreadLocalRandom.current().nextInt(candidateGenerators.size()));
index 1d0f21a50884f9fb173289067f61192104ff4fa8..0a14e5b0b51938f37793e7a7d768f2a4b899d179 100644 (file)
@@ -246,6 +246,7 @@ class RegionHDFSBlockLocationFinder extends Configured {
    */
   @RestrictedApi(explanation = "Should only be called in tests", link = "",
       allowedOnPath = ".*/src/test/.*|.*/RegionHDFSBlockLocationFinder.java")
+  @SuppressWarnings("MixedMutabilityReturnType")
   List<ServerName> mapHostNameToServerName(List<String> hosts) {
     if (hosts == null || status == null) {
       if (hosts == null) {
index 6ad1a18e83dadbc810f62403678618339c0caf58..c2385084a3ea167f802c176ac9cfdf1765cbc03a 100644 (file)
@@ -349,8 +349,8 @@ public class CatalogFamilyFormat {
   }
 
   /**
-   * @return Deserialized values of &lt;qualifier,regioninfo&gt; pairs taken from column values that
-   *         match the regex 'info:merge.*' in array of <code>cells</code>.
+   * Returns Deserialized values of &lt;qualifier,regioninfo&gt; pairs taken from column values that
+   * match the regex 'info:merge.*' in array of <code>cells</code>.
    */
   @Nullable
   public static Map<String, RegionInfo> getMergeRegionsWithName(Cell[] cells) {
@@ -376,8 +376,8 @@ public class CatalogFamilyFormat {
   }
 
   /**
-   * @return Deserialized regioninfo values taken from column values that match the regex
-   *         'info:merge.*' in array of <code>cells</code>.
+   * Returns Deserialized regioninfo values taken from column values that match the regex
+   * 'info:merge.*' in array of <code>cells</code>.
    */
   @Nullable
   public static List<RegionInfo> getMergeRegions(Cell[] cells) {
@@ -386,8 +386,8 @@ public class CatalogFamilyFormat {
   }
 
   /**
-   * @return True if any merge regions present in <code>cells</code>; i.e. the column in
-   *         <code>cell</code> matches the regex 'info:merge.*'.
+   * Returns True if any merge regions present in <code>cells</code>; i.e. the column in
+   * <code>cell</code> matches the regex 'info:merge.*'.
    */
   public static boolean hasMergeRegions(Cell[] cells) {
     for (Cell cell : cells) {
@@ -398,9 +398,7 @@ public class CatalogFamilyFormat {
     return false;
   }
 
-  /**
-   * @return True if the column in <code>cell</code> matches the regex 'info:merge.*'.
-   */
+  /** Returns True if the column in <code>cell</code> matches the regex 'info:merge.*'. */
   public static boolean isMergeQualifierPrefix(Cell cell) {
     // Check to see if has family and that qualifier starts with the merge qualifier 'merge'
     return CellUtil.matchingFamily(cell, HConstants.CATALOG_FAMILY)
index e948048325feef04ee488e919390f2b3ac3ded0d..42bfd757e0d1cc1451d058275f091333e807b954 100644 (file)
@@ -59,6 +59,7 @@ public final class ClientMetaTableAccessor {
   }
 
   @InterfaceAudience.Private
+  @SuppressWarnings("ImmutableEnumChecker")
   public enum QueryType {
     ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY),
     REGION(HConstants.CATALOG_FAMILY),
@@ -100,11 +101,7 @@ public final class ClientMetaTableAccessor {
     return future;
   }
 
-  /**
-   * Returns the HRegionLocation from meta for the given region n * @param regionName region we're
-   * looking for
-   * @return HRegionLocation for the given region
-   */
+  /** Returns the HRegionLocation from meta for the given region */
   public static CompletableFuture<Optional<HRegionLocation>>
     getRegionLocation(AsyncTable<?> metaTable, byte[] regionName) {
     CompletableFuture<Optional<HRegionLocation>> future = new CompletableFuture<>();
@@ -126,11 +123,7 @@ public final class ClientMetaTableAccessor {
     return future;
   }
 
-  /**
-   * Returns the HRegionLocation from meta for the given encoded region name n * @param
-   * encodedRegionName region we're looking for
-   * @return HRegionLocation for the given region
-   */
+  /** Returns the HRegionLocation from meta for the given encoded region name */
   public static CompletableFuture<Optional<HRegionLocation>>
     getRegionLocationWithEncodedName(AsyncTable<?> metaTable, byte[] encodedRegionName) {
     CompletableFuture<Optional<HRegionLocation>> future = new CompletableFuture<>();
@@ -167,8 +160,9 @@ public final class ClientMetaTableAccessor {
   }
 
   /**
-   * Used to get all region locations for the specific table. n * @param tableName table we're
-   * looking for, can be null for getting all regions
+   * Used to get all region locations for the specific table
+   * @param metaTable scanner over meta table
+   * @param tableName table we're looking for, can be null for getting all regions
    * @return the list of region locations. The return value will be wrapped by a
    *         {@link CompletableFuture}.
    */
@@ -191,8 +185,9 @@ public final class ClientMetaTableAccessor {
   }
 
   /**
-   * Used to get table regions' info and server. n * @param tableName table we're looking for, can
-   * be null for getting all regions
+   * Used to get table regions' info and server.
+   * @param metaTable                   scanner over meta table
+   * @param tableName                   table we're looking for, can be null for getting all regions
    * @param excludeOfflinedSplitParents don't return split parents
    * @return the list of regioninfos and server. The return value will be wrapped by a
    *         {@link CompletableFuture}.
@@ -221,9 +216,11 @@ public final class ClientMetaTableAccessor {
   }
 
   /**
-   * Performs a scan of META table for given table. n * @param tableName table withing we scan
-   * @param type    scanned part of meta
-   * @param visitor Visitor invoked against each row
+   * Performs a scan of META table for given table.
+   * @param metaTable scanner over meta table
+   * @param tableName table within we scan
+   * @param type      scanned part of meta
+   * @param visitor   Visitor invoked against each row
    */
   private static CompletableFuture<Void> scanMeta(AsyncTable<AdvancedScanResultConsumer> metaTable,
     TableName tableName, QueryType type, final Visitor visitor) {
@@ -232,11 +229,13 @@ public final class ClientMetaTableAccessor {
   }
 
   /**
-   * Performs a scan of META table for given table. n * @param startRow Where to start the scan
-   * @param stopRow Where to stop the scan
-   * @param type    scanned part of meta
-   * @param maxRows maximum rows to return
-   * @param visitor Visitor invoked against each row
+   * Performs a scan of META table for given table.
+   * @param metaTable scanner over meta table
+   * @param startRow  Where to start the scan
+   * @param stopRow   Where to stop the scan
+   * @param type      scanned part of meta
+   * @param maxRows   maximum rows to return
+   * @param visitor   Visitor invoked against each row
    */
   private static CompletableFuture<Void> scanMeta(AsyncTable<AdvancedScanResultConsumer> metaTable,
     byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) {
@@ -382,9 +381,7 @@ public final class ClientMetaTableAccessor {
 
     abstract void add(Result r);
 
-    /**
-     * @return Collected results; wait till visits complete to collect all possible results
-     */
+    /** Returns Collected results; wait till visits complete to collect all possible results */
     List<T> getResults() {
       return this.results;
     }
@@ -458,19 +455,12 @@ public final class ClientMetaTableAccessor {
     return scan;
   }
 
-  /**
-   * Returns an HRegionLocationList extracted from the result.
-   * @return an HRegionLocationList containing all locations for the region range or null if we
-   *         can't deserialize the result.
-   */
+  /** Returns an HRegionLocationList extracted from the result. */
   private static Optional<RegionLocations> getRegionLocations(Result r) {
     return Optional.ofNullable(CatalogFamilyFormat.getRegionLocations(r));
   }
 
-  /**
-   * @param tableName table we're working with
-   * @return start row for scanning META according to query type
-   */
+  /** Returns start row for scanning META according to query type */
   public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type) {
     if (tableName == null) {
       return null;
@@ -492,10 +482,7 @@ public final class ClientMetaTableAccessor {
     }
   }
 
-  /**
-   * @param tableName table we're working with
-   * @return stop row for scanning META according to query type
-   */
+  /** Returns stop row for scanning META according to query type */
   public static byte[] getTableStopRowForMeta(TableName tableName, QueryType type) {
     if (tableName == null) {
       return null;
index e769e80847f96d5fda197d35315ee2a113d7f99e..8c675c4522e60a5b970743efb3b2f3f4c933770f 100644 (file)
@@ -45,14 +45,13 @@ public class ClusterId {
     this.id = uuid;
   }
 
-  /**
-   * @return The clusterid serialized using pb w/ pb magic prefix
-   */
+  /** Returns The clusterid serialized using pb w/ pb magic prefix */
   public byte[] toByteArray() {
     return ProtobufUtil.prependPBMagic(convert().toByteArray());
   }
 
   /**
+   * Parse the serialized representation of the {@link ClusterId}
    * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
    * @return An instance of {@link ClusterId} made from <code>bytes</code> n * @see #toByteArray()
    */
@@ -74,9 +73,7 @@ public class ClusterId {
     }
   }
 
-  /**
-   * @return A pb instance to represent this instance.
-   */
+  /** Returns A pb instance to represent this instance. */
   public ClusterIdProtos.ClusterId convert() {
     ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
     return builder.setClusterId(this.id).build();
index e36010c424ceb6f69c5df7e439a45d2417900cea..7c4f2c823d13ccaf40a0fda467759beb4e4c7bd3 100644 (file)
@@ -69,38 +69,26 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Public
 public interface ClusterMetrics {
 
-  /**
-   * @return the HBase version string as reported by the HMaster
-   */
+  /** Returns the HBase version string as reported by the HMaster */
   @Nullable
   String getHBaseVersion();
 
-  /**
-   * @return the names of region servers on the dead list
-   */
+  /** Returns the names of region servers on the dead list */
   List<ServerName> getDeadServerNames();
 
-  /**
-   * @return the names of region servers on the decommissioned list
-   */
+  /** Returns the names of region servers on the decommissioned list */
   List<ServerName> getDecommissionedServerNames();
 
-  /**
-   * @return the names of region servers on the live list
-   */
+  /** Returns the names of region servers on the live list */
   Map<ServerName, ServerMetrics> getLiveServerMetrics();
 
-  /**
-   * @return the number of regions deployed on the cluster
-   */
+  /** Returns the number of regions deployed on the cluster */
   default int getRegionCount() {
     return getLiveServerMetrics().entrySet().stream()
       .mapToInt(v -> v.getValue().getRegionMetrics().size()).sum();
   }
 
-  /**
-   * @return the number of requests since last report
-   */
+  /** Returns the number of requests since last report */
   default long getRequestCount() {
     return getLiveServerMetrics().entrySet().stream()
       .flatMap(v -> v.getValue().getRegionMetrics().values().stream())
@@ -114,9 +102,7 @@ public interface ClusterMetrics {
   @Nullable
   ServerName getMasterName();
 
-  /**
-   * @return the names of backup masters
-   */
+  /** Returns the names of backup masters */
   List<ServerName> getBackupMasterNames();
 
   @InterfaceAudience.Private
@@ -147,9 +133,7 @@ public interface ClusterMetrics {
 
   List<ServerName> getServersName();
 
-  /**
-   * @return the average cluster load
-   */
+  /** Returns the average cluster load */
   default double getAverageLoad() {
     int serverSize = getLiveServerMetrics().size();
     if (serverSize == 0) {
index 5695f5b65adea9f7110a9bc6ec6ac00a9b43c973..7254209487b2b145c14c67bf5521d55590cb0e35 100644 (file)
@@ -67,13 +67,13 @@ public final class ClusterMetricsBuilder {
           .collect(Collectors.toList()))
         .addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream()
           .map(status -> ClusterStatusProtos.TableRegionStatesCount.newBuilder()
-            .setTableName(ProtobufUtil.toProtoTableName((status.getKey())))
+            .setTableName(ProtobufUtil.toProtoTableName(status.getKey()))
             .setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())).build())
           .collect(Collectors.toList()))
         .addAllDecommissionedServers(metrics.getDecommissionedServerNames().stream()
           .map(ProtobufUtil::toServerName).collect(Collectors.toList()));
     if (metrics.getMasterName() != null) {
-      builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName())));
+      builder.setMaster(ProtobufUtil.toServerName(metrics.getMasterName()));
     }
     if (metrics.getMasterTasks() != null) {
       builder.addAllMasterTasks(metrics.getMasterTasks().stream()
index edbc5f479d6ef31944e1f66d436b060f89295d58..32e06d610247765cef26c72baa4ec37298935130 100644 (file)
@@ -28,29 +28,27 @@ import org.apache.yetus.audience.InterfaceStability;
 @InterfaceStability.Evolving
 public interface CoprocessorEnvironment<C extends Coprocessor> {
 
-  /** @return the Coprocessor interface version */
+  /** Returns the Coprocessor interface version */
   int getVersion();
 
-  /** @return the HBase version as a string (e.g. "0.21.0") */
+  /** Returns the HBase version as a string (e.g. "0.21.0") */
   String getHBaseVersion();
 
-  /** @return the loaded coprocessor instance */
+  /** Returns the loaded coprocessor instance */
   C getInstance();
 
-  /** @return the priority assigned to the loaded coprocessor */
+  /** Returns the priority assigned to the loaded coprocessor */
   int getPriority();
 
-  /** @return the load sequence number */
+  /** Returns the load sequence number */
   int getLoadSequence();
 
   /**
-   * @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to
-   *         set a configuration.
+   * Returns a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to
+   * set a configuration.
    */
   Configuration getConfiguration();
 
-  /**
-   * @return the classloader for the loaded coprocessor instance
-   */
+  /** Returns the classloader for the loaded coprocessor instance */
   ClassLoader getClassLoader();
 }
index 2e4ebbd0baa68d2f2a6588b3fa78f5ec2508f3b9..47a86f9492f56916dce7420ce2732b3616655735 100644 (file)
@@ -44,10 +44,7 @@ public class HBaseServerException extends HBaseIOException {
     this.serverOverloaded = serverOverloaded;
   }
 
-  /**
-   * @param t throwable to check for server overloaded state
-   * @return True if the server was considered overloaded when the exception was thrown
-   */
+  /** Returns True if the server was considered overloaded when the exception was thrown */
   public static boolean isServerOverloaded(Throwable t) {
     if (t instanceof HBaseServerException) {
       return ((HBaseServerException) t).isServerOverloaded();
@@ -63,9 +60,7 @@ public class HBaseServerException extends HBaseIOException {
     this.serverOverloaded = serverOverloaded;
   }
 
-  /**
-   * @return True if server was considered overloaded when exception was thrown
-   */
+  /** Returns True if server was considered overloaded when exception was thrown */
   public boolean isServerOverloaded() {
     return serverOverloaded;
   }
index 0decb58bc20b45d664f8f0c2284c5d1559a393f9..ebf6d919374d2c9d8245b78baab6482c80f8d172 100644 (file)
@@ -100,8 +100,8 @@ public class HRegionLocation implements Comparable<HRegionLocation> {
   }
 
   /**
-   * @return String made of hostname and port formatted as per
-   *         {@link Addressing#createHostAndPortStr(String, int)}
+   * Returns String made of hostname and port formatted as per
+   * {@link Addressing#createHostAndPortStr(String, int)}
    */
   public String getHostnamePort() {
     return Addressing.createHostAndPortStr(this.getHostname(), this.getPort());
index 4d6dd6d43fa3d22cc4d8e60d50710780d3df4217..4c0390c6c3bed925ad58a421a518eba4cfad6e3e 100644 (file)
@@ -208,6 +208,7 @@ public class RegionLocations implements Iterable<HRegionLocation> {
    * @param other the locations to merge with
    * @return an RegionLocations object with merged locations or the same object if nothing is merged
    */
+  @SuppressWarnings("ReferenceEquality")
   public RegionLocations mergeLocations(RegionLocations other) {
     assert other != null;
 
@@ -280,6 +281,7 @@ public class RegionLocations implements Iterable<HRegionLocation> {
    * @return an RegionLocations object with updated locations or the same object if nothing is
    *         updated
    */
+  @SuppressWarnings("ReferenceEquality")
   public RegionLocations updateLocation(HRegionLocation location, boolean checkForEquals,
     boolean force) {
     assert location != null;
index d873c4bc1cb43c9aa70035afd48a17cd14f5eadb..47b36a7a1516cf5f934b31761efd12b9a369e2e7 100644 (file)
@@ -28,64 +28,44 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Public
 public interface RegionMetrics {
 
-  /**
-   * @return the region name
-   */
+  /** Returns the region name */
   byte[] getRegionName();
 
-  /**
-   * @return the number of stores
-   */
+  /** Returns the number of stores */
   int getStoreCount();
 
-  /**
-   * @return the number of storefiles
-   */
+  /** Returns the number of storefiles */
   int getStoreFileCount();
 
-  /**
-   * @return the total size of the storefiles
-   */
+  /** Returns the total size of the storefiles */
   Size getStoreFileSize();
 
-  /**
-   * @return the memstore size
-   */
+  /** Returns the memstore size */
   Size getMemStoreSize();
 
-  /**
-   * @return the number of read requests made to region
-   */
+  /** Returns the number of read requests made to region */
   long getReadRequestCount();
 
-  /**
-   * @return the number of write requests made to region
-   */
+  /** Returns the number of write requests made to region */
   long getWriteRequestCount();
 
-  /**
-   * @return the number of coprocessor service requests made to region
-   */
+  /** Returns the number of coprocessor service requests made to region */
   public long getCpRequestCount();
 
   /**
-   * @return the number of write requests and read requests and coprocessor service requests made to
-   *         region
+   * Returns the number of write requests and read requests and coprocessor service requests made to
+   * region
    */
   default long getRequestCount() {
     return getReadRequestCount() + getWriteRequestCount() + getCpRequestCount();
   }
 
-  /**
-   * @return the region name as a string
-   */
+  /** Returns the region name as a string */
   default String getNameAsString() {
     return Bytes.toStringBinary(getRegionName());
   }
 
-  /**
-   * @return the number of filtered read requests made to region
-   */
+  /** Returns the number of filtered read requests made to region */
   long getFilteredReadRequestCount();
 
   /**
@@ -96,29 +76,19 @@ public interface RegionMetrics {
    */
   Size getStoreFileIndexSize();
 
-  /**
-   * @return The current total size of root-level indexes for the region
-   */
+  /** Returns The current total size of root-level indexes for the region */
   Size getStoreFileRootLevelIndexSize();
 
-  /**
-   * @return The total size of all index blocks, not just the root level
-   */
+  /** Returns The total size of all index blocks, not just the root level */
   Size getStoreFileUncompressedDataIndexSize();
 
-  /**
-   * @return The total size of all Bloom filter blocks, not just loaded into the block cache
-   */
+  /** Returns The total size of all Bloom filter blocks, not just loaded into the block cache */
   Size getBloomFilterSize();
 
-  /**
-   * @return the total number of cells in current compaction
-   */
+  /** Returns the total number of cells in current compaction */
   long getCompactingCellCount();
 
-  /**
-   * @return the number of already compacted kvs in current compaction
-   */
+  /** Returns the number of already compacted kvs in current compaction */
   long getCompactedCellCount();
 
   /**
@@ -127,34 +97,24 @@ public interface RegionMetrics {
    */
   long getCompletedSequenceId();
 
-  /**
-   * @return completed sequence id per store.
-   */
+  /** Returns completed sequence id per store. */
   Map<byte[], Long> getStoreSequenceId();
 
-  /**
-   * @return the uncompressed size of the storefiles
-   */
+  /** Returns the uncompressed size of the storefiles */
   Size getUncompressedStoreFileSize();
 
-  /**
-   * @return the data locality of region in the regionserver.
-   */
+  /** Returns the data locality of region in the regionserver. */
   float getDataLocality();
 
-  /**
-   * @return the timestamp of the oldest hfile for any store of this region.
-   */
+  /** Returns the timestamp of the oldest hfile for any store of this region. */
   long getLastMajorCompactionTimestamp();
 
-  /**
-   * @return the reference count for the stores of this region
-   */
+  /** Returns the reference count for the stores of this region */
   int getStoreRefCount();
 
   /**
-   * @return the max reference count for any store file among all compacted stores files of this
-   *         region
+   * Returns the max reference count for any store file among all compacted stores files of this
+   * region
    */
   int getMaxCompactedStoreFileRefCount();
 
@@ -164,9 +124,7 @@ public interface RegionMetrics {
    */
   float getDataLocalityForSsd();
 
-  /**
-   * @return the data at local weight of this region in the regionserver
-   */
+  /** Returns the data at local weight of this region in the regionserver */
   long getBlocksLocalWeight();
 
   /**
@@ -175,13 +133,9 @@ public interface RegionMetrics {
    */
   long getBlocksLocalWithSsdWeight();
 
-  /**
-   * @return the block total weight of this region
-   */
+  /** Returns the block total weight of this region */
   long getBlocksTotalWeight();
 
-  /**
-   * @return the compaction state of this region
-   */
+  /** Returns the compaction state of this region */
   CompactionState getCompactionState();
 }
index 38286afa2d15e39437cb3c1590e53b8a18886b52..e0c408781f8df94126d5fcff0ca3be9b67bedb1f 100644 (file)
@@ -33,48 +33,32 @@ public interface ServerMetrics {
 
   ServerName getServerName();
 
-  /**
-   * @return the version number of a regionserver.
-   */
+  /** Returns the version number of a regionserver. */
   default int getVersionNumber() {
     return 0;
   }
 
-  /**
-   * @return the string type version of a regionserver.
-   */
+  /** Returns the string type version of a regionserver. */
   default String getVersion() {
     return "0.0.0";
   }
 
-  /**
-   * @return the number of requests per second.
-   */
+  /** Returns the number of requests per second. */
   long getRequestCountPerSecond();
 
-  /**
-   * @return total Number of requests from the start of the region server.
-   */
+  /** Returns total Number of requests from the start of the region server. */
   long getRequestCount();
 
-  /**
-   * @return total Number of read requests from the start of the region server.
-   */
+  /** Returns total Number of read requests from the start of the region server. */
   long getReadRequestsCount();
 
-  /**
-   * @return total Number of write requests from the start of the region server.
-   */
+  /** Returns total Number of write requests from the start of the region server. */
   long getWriteRequestsCount();
 
-  /**
-   * @return the amount of used heap
-   */
+  /** Returns the amount of used heap */
   Size getUsedHeapSize();
 
-  /**
-   * @return the maximum allowable size of the heap
-   */
+  /** Returns the maximum allowable size of the heap */
   Size getMaxHeapSize();
 
   int getInfoServerPort();
@@ -97,14 +81,10 @@ public interface ServerMetrics {
   @Nullable
   ReplicationLoadSink getReplicationLoadSink();
 
-  /**
-   * @return region load metrics
-   */
+  /** Returns region load metrics */
   Map<byte[], RegionMetrics> getRegionMetrics();
 
-  /**
-   * @return metrics per user
-   */
+  /** Returns metrics per user */
   Map<byte[], UserMetrics> getUserMetrics();
 
   /**
@@ -113,14 +93,10 @@ public interface ServerMetrics {
    */
   Set<String> getCoprocessorNames();
 
-  /**
-   * @return the timestamp (server side) of generating this metrics
-   */
+  /** Returns the timestamp (server side) of generating this metrics */
   long getReportTimestamp();
 
-  /**
-   * @return the last timestamp (server side) of generating this metrics
-   */
+  /** Returns the last timestamp (server side) of generating this metrics */
   long getLastReportTimestamp();
 
   /**
index 99f8520aa362e50c0d2637434ea4a122ec96481d..7a0312f22fdc3e1ba196a1d21bca97ad1818dd2e 100644 (file)
@@ -44,10 +44,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 @InterfaceAudience.Private
 public final class ServerMetricsBuilder {
 
-  /**
-   * @param sn the server name
-   * @return a empty metrics
-   */
   public static ServerMetrics of(ServerName sn) {
     return newBuilder(sn).build();
   }
@@ -300,6 +296,7 @@ public final class ServerMetricsBuilder {
       return versionNumber;
     }
 
+    @Override
     public String getVersion() {
       return version;
     }
@@ -414,16 +411,18 @@ public final class ServerMetricsBuilder {
         int currentMaxCompactedStoreFileRefCount = r.getMaxCompactedStoreFileRefCount();
         maxCompactedStoreFileRefCount =
           Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount);
-        uncompressedStoreFileSizeMB += r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
-        storeFileSizeMB += r.getStoreFileSize().get(Size.Unit.MEGABYTE);
-        memStoreSizeMB += r.getMemStoreSize().get(Size.Unit.MEGABYTE);
-        storefileIndexSizeKB += r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
+        uncompressedStoreFileSizeMB +=
+          (long) r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
+        storeFileSizeMB += (long) r.getStoreFileSize().get(Size.Unit.MEGABYTE);
+        memStoreSizeMB += (long) r.getMemStoreSize().get(Size.Unit.MEGABYTE);
+        storefileIndexSizeKB +=
+          (long) r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
         readRequestsCount += r.getReadRequestCount();