This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 407779a9ce3 HDDS-14088. Replace Preconditions.checkNotNull in
hdds-server-scm (#9450)
407779a9ce3 is described below
commit 407779a9ce352fa4da60d9329493317a4ee72183
Author: ChenChen Lai <[email protected]>
AuthorDate: Sun Dec 7 18:07:49 2025 +0800
HDDS-14088. Replace Preconditions.checkNotNull in hdds-server-scm (#9450)
---
.../scm/block/DeletedBlockLogStateManagerImpl.java | 5 +--
.../scm/command/CommandStatusReportHandler.java | 6 ++--
.../container/placement/metrics/SCMNodeMetric.java | 9 +++--
.../hadoop/hdds/scm/ha/InterSCMGrpcClient.java | 6 ++--
.../hdds/scm/ha/InterSCMGrpcProtocolService.java | 6 ++--
.../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 8 ++---
.../hadoop/hdds/scm/ha/SCMHANodeDetails.java | 6 ++--
.../hadoop/hdds/scm/ha/SCMRatisServerImpl.java | 5 +--
.../hadoop/hdds/scm/ha/SCMServiceManager.java | 4 +--
.../apache/hadoop/hdds/scm/ha/SCMStateMachine.java | 2 +-
.../hadoop/hdds/scm/ha/SequenceIdGenerator.java | 13 +++----
.../scm/ha/StatefulServiceStateManagerImpl.java | 6 ++--
.../hdds/scm/node/HealthyReadOnlyNodeHandler.java | 6 ++--
.../hadoop/hdds/scm/node/NodeReportHandler.java | 8 ++---
.../hadoop/hdds/scm/node/SCMNodeManager.java | 4 +--
.../hdds/scm/node/SCMNodeStorageStatMap.java | 17 ++++-----
.../hadoop/hdds/scm/node/states/ReportResult.java | 6 ++--
.../hdds/scm/pipeline/PipelinePlacementPolicy.java | 5 +--
.../hdds/scm/pipeline/PipelineReportHandler.java | 8 ++---
.../scm/pipeline/PipelineStateManagerImpl.java | 4 +--
.../hadoop/hdds/scm/pipeline/PipelineStateMap.java | 42 +++++++++++-----------
.../scm/safemode/HealthyPipelineSafeModeRule.java | 3 +-
.../safemode/OneReplicaPipelineSafeModeRule.java | 3 +-
.../hadoop/hdds/scm/server/SCMCertStore.java | 4 +--
.../scm/server/SCMDatanodeHeartbeatDispatcher.java | 6 ++--
.../hdds/scm/server/SCMDatanodeProtocolServer.java | 6 ++--
.../hdds/scm/server/StorageContainerManager.java | 12 +++----
.../server/upgrade/FinalizationManagerImpl.java | 16 ++++-----
.../upgrade/FinalizationStateManagerImpl.java | 8 ++---
.../upgrade/SCMUpgradeFinalizationContext.java | 16 ++++-----
30 files changed, 127 insertions(+), 123 deletions(-)
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
index 14c8986dcf1..ca7314ee3aa 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
@@ -24,6 +24,7 @@
import java.util.HashMap;
import java.util.Map;
import java.util.NoSuchElementException;
+import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import
org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionSummary;
@@ -212,7 +213,7 @@ public int resetRetryCountOfTransactionInDB(ArrayList<Long>
txIDs)
@Override
public void onFlush() {
// onFlush() can be invoked only when ratis is enabled.
- Preconditions.checkNotNull(deletingTxIDs);
+ Objects.requireNonNull(deletingTxIDs, "deletingTxIDs == null");
deletingTxIDs.clear();
}
@@ -271,7 +272,7 @@ public Builder setStatefulConfigTable(final Table<String,
ByteString> table) {
}
public DeletedBlockLogStateManager build() throws IOException {
- Preconditions.checkNotNull(deletedBlocksTransactionTable);
+ Objects.requireNonNull(deletedBlocksTransactionTable,
"deletedBlocksTransactionTable == null");
final DeletedBlockLogStateManager impl = new
DeletedBlockLogStateManagerImpl(
deletedBlocksTransactionTable, statefulServiceConfigTable,
containerManager, transactionBuffer);
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java
index c5ff1f3f31d..2d794ac83a8 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java
@@ -17,9 +17,9 @@
package org.apache.hadoop.hdds.scm.command;
-import com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.List;
+import java.util.Objects;
import org.apache.hadoop.hdds.HddsIdFactory;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus;
@@ -44,9 +44,9 @@ public class CommandStatusReportHandler implements
@Override
public void onMessage(CommandStatusReportFromDatanode report,
EventPublisher publisher) {
- Preconditions.checkNotNull(report);
+ Objects.requireNonNull(report, "report == null");
List<CommandStatus> cmdStatusList = report.getReport().getCmdStatusList();
- Preconditions.checkNotNull(cmdStatusList);
+ Objects.requireNonNull(cmdStatusList, "cmdStatusList == null");
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Processing command status report for dn: {}", report
.getDatanodeDetails());
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
index 1cd8aa5ce36..184dd715c20 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdds.scm.container.placement.metrics;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
+import java.util.Objects;
/**
* SCM Node Metric that is used in the placement classes.
@@ -33,7 +33,7 @@ public class SCMNodeMetric implements
DatanodeMetric<SCMNodeStat, Long>,
* @param stat - SCMNodeStat.
*/
public SCMNodeMetric(SCMNodeStat stat) {
- this.stat = stat;
+ this.stat = Objects.requireNonNull(stat, "stat == null");
}
/**
@@ -58,8 +58,7 @@ public SCMNodeMetric(long capacity, long used, long remaining,
*/
@Override
public boolean isGreater(SCMNodeStat o) {
- Preconditions.checkNotNull(this.stat, "Argument cannot be null");
- Preconditions.checkNotNull(o, "Argument cannot be null");
+ Objects.requireNonNull(o, "o == null");
// if zero, replace with 1 for the division to work.
long thisDenominator = (this.stat.getCapacity().get() == 0)
@@ -89,7 +88,7 @@ public boolean isGreater(SCMNodeStat o) {
*/
@Override
public boolean isLess(SCMNodeStat o) {
- Preconditions.checkNotNull(o, "Argument cannot be null");
+ Objects.requireNonNull(o, "Argument cannot be null");
// if zero, replace with 1 for the division to work.
long thisDenominator = (this.stat.getCapacity().get() == 0)
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java
index 20496246337..59aff304f25 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java
@@ -17,12 +17,12 @@
package org.apache.hadoop.hdds.scm.ha;
-import com.google.common.base.Preconditions;
import java.io.IOException;
import java.io.OutputStream;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -58,7 +58,7 @@ public class InterSCMGrpcClient implements
SCMSnapshotDownloader {
public InterSCMGrpcClient(final String host,
int port, final ConfigurationSource conf,
CertificateClient scmCertificateClient) throws IOException {
- Preconditions.checkNotNull(conf);
+ Objects.requireNonNull(conf, "conf == null");
timeout = conf.getTimeDuration(
ScmConfigKeys.OZONE_SCM_HA_GRPC_DEADLINE_INTERVAL,
ScmConfigKeys.OZONE_SCM_HA_GRPC_DEADLINE_INTERVAL_DEFAULT,
@@ -130,7 +130,7 @@ public StreamDownloader(CompletableFuture<Path> response,
this.response = response;
this.outputPath = outputPath;
try {
- Preconditions.checkNotNull(outputPath, "Output path cannot be null");
+ Objects.requireNonNull(outputPath, "Output path cannot be null");
stream = Files.newOutputStream(outputPath);
} catch (IOException e) {
throw new UncheckedIOException(
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcProtocolService.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcProtocolService.java
index db090460001..875aeb16068 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcProtocolService.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcProtocolService.java
@@ -19,8 +19,8 @@
import static
org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder.forServer;
-import com.google.common.base.Preconditions;
import java.io.IOException;
+import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -52,7 +52,7 @@ public class InterSCMGrpcProtocolService {
InterSCMGrpcProtocolService(final ConfigurationSource conf,
final StorageContainerManager scm) throws IOException {
- Preconditions.checkNotNull(conf);
+ Objects.requireNonNull(conf, "conf == null");
this.port = conf.getInt(ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY,
ScmConfigKeys.OZONE_SCM_GRPC_PORT_DEFAULT);
@@ -82,7 +82,7 @@ public class InterSCMGrpcProtocolService {
"InterSCMGrpcProtocolService GRPC endpoint.");
}
}
- Preconditions.checkNotNull(b);
+ Objects.requireNonNull(b, "b == null");
server = nettyServerBuilder.build();
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
index a3f20476dc3..dbfc9154e94 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
@@ -22,11 +22,11 @@
import static
org.apache.hadoop.hdds.utils.HddsServerUtil.getSecretKeyClientForScm;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.util.List;
+import java.util.Objects;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hdds.ExitManager;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -406,8 +406,8 @@ public boolean addSCM(AddSCMRequest request) throws
IOException {
+ " has cluster Id " + request.getClusterId()
+ " but leader SCM cluster id is " + clusterId);
}
- Preconditions.checkNotNull(
- getRatisServer().getDivision().getGroup().getGroupId());
+ Objects.requireNonNull(
+ getRatisServer().getDivision().getGroup().getGroupId(), "GroupId ==
null");
return getRatisServer().addSCM(request);
}
@@ -424,7 +424,7 @@ public boolean removeSCM(RemoveSCMRequest request) throws
IOException {
" has cluster Id " + request.getClusterId() +
" but leader SCM cluster id is " + clusterId);
}
- Preconditions.checkNotNull(ratisServer.getDivision().getGroup());
+ Objects.requireNonNull(ratisServer.getDivision().getGroup(), "Group ==
null");
return ratisServer.removeSCM(request);
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java
index b251c69ad7e..ff2f4fa0b71 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java
@@ -44,7 +44,6 @@
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FLEXIBLE_FQDN_RESOLUTION_ENABLED;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FLEXIBLE_FQDN_RESOLUTION_ENABLED_DEFAULT;
-import com.google.common.base.Preconditions;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
@@ -52,6 +51,7 @@
import java.util.Collection;
import java.util.Collections;
import java.util.List;
+import java.util.Objects;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -288,8 +288,8 @@ public static SCMHANodeDetails
loadSCMHAConfig(OzoneConfiguration conf,
public static SCMNodeDetails getHASCMNodeDetails(OzoneConfiguration conf,
String localScmServiceId, String localScmNodeId,
InetSocketAddress rpcAddress, int ratisPort, int grpcPort) {
- Preconditions.checkNotNull(localScmServiceId);
- Preconditions.checkNotNull(localScmNodeId);
+ Objects.requireNonNull(localScmServiceId, "localScmServiceId == null");
+ Objects.requireNonNull(localScmNodeId, "localScmNodeId == null");
SCMNodeDetails.Builder builder = new SCMNodeDetails.Builder();
builder
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
index ff23c0b8fde..6c547d08271 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
@@ -27,6 +27,7 @@
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
+import java.util.Objects;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
@@ -392,7 +393,7 @@ public boolean removeSCM(RemoveSCMRequest request) throws
IOException {
private static RaftGroup buildRaftGroup(SCMNodeDetails details,
String scmId, String clusterId) {
- Preconditions.checkNotNull(scmId);
+ Objects.requireNonNull(scmId, "scmId == null");
final RaftGroupId groupId = buildRaftGroupId(clusterId);
RaftPeerId selfPeerId = getSelfPeerId(scmId);
@@ -414,7 +415,7 @@ public static RaftPeerId getSelfPeerId(String scmId) {
@VisibleForTesting
public static RaftGroupId buildRaftGroupId(String clusterId) {
- Preconditions.checkNotNull(clusterId);
+ Objects.requireNonNull(clusterId, "clusterId == null");
return RaftGroupId.valueOf(
UUID.fromString(clusterId.replace(OzoneConsts.CLUSTER_ID_PREFIX, "")));
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMServiceManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMServiceManager.java
index db203120aa0..1d15b130f3f 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMServiceManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMServiceManager.java
@@ -17,9 +17,9 @@
package org.apache.hadoop.hdds.scm.ha;
-import com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.List;
+import java.util.Objects;
import org.apache.hadoop.hdds.scm.ha.SCMService.Event;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -38,7 +38,7 @@ public final class SCMServiceManager {
* Register a SCMService to SCMServiceManager.
*/
public synchronized void register(SCMService service) {
- Preconditions.checkNotNull(service);
+ Objects.requireNonNull(service, "service == null");
LOG.info("Registering service {}.", service.getServiceName());
services.add(service);
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java
index ecc175639b6..840b3880269 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java
@@ -401,7 +401,7 @@ public void pause() {
@Override
public void reinitialize() throws IOException {
- Preconditions.checkNotNull(installingDBCheckpoint);
+ requireNonNull(installingDBCheckpoint, "installingDBCheckpoint == null");
DBCheckpoint checkpoint = installingDBCheckpoint;
List<ManagedSecretKey> secretKeys = installingSecretKeys;
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SequenceIdGenerator.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SequenceIdGenerator.java
index 78561cbc2d4..bd11907829b 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SequenceIdGenerator.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SequenceIdGenerator.java
@@ -28,6 +28,7 @@
import java.time.LocalDate;
import java.util.HashMap;
import java.util.Map;
+import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
@@ -93,13 +94,13 @@ public SequenceIdGenerator(ConfigurationSource conf,
this.batchSize = conf.getInt(OZONE_SCM_SEQUENCE_ID_BATCH_SIZE,
OZONE_SCM_SEQUENCE_ID_BATCH_SIZE_DEFAULT);
- Preconditions.checkNotNull(scmhaManager);
+ Objects.requireNonNull(scmhaManager, "scmhaManager == null");
this.stateManager = createStateManager(scmhaManager, sequenceIdTable);
}
public StateManager createStateManager(SCMHAManager scmhaManager,
Table<String, Long> sequenceIdTable) {
- Preconditions.checkNotNull(scmhaManager);
+ Objects.requireNonNull(scmhaManager, "scmhaManager == null");
return new StateManagerImpl.Builder()
.setRatisServer(scmhaManager.getRatisServer())
.setDBTransactionBuffer(scmhaManager.getDBTransactionBuffer())
@@ -281,9 +282,9 @@ private void initialize() throws IOException {
Table.KeyValue<String, Long> kv = iterator.next();
final String sequenceIdName = kv.getKey();
final Long lastId = kv.getValue();
- Preconditions.checkNotNull(sequenceIdName,
+ Objects.requireNonNull(sequenceIdName,
"sequenceIdName should not be null");
- Preconditions.checkNotNull(lastId,
+ Objects.requireNonNull(lastId,
"lastId should not be null");
sequenceIdToLastIdMap.put(sequenceIdName, lastId);
}
@@ -315,8 +316,8 @@ public Builder setDBTransactionBuffer(DBTransactionBuffer
trxBuffer) {
}
public StateManager build() {
- Preconditions.checkNotNull(table);
- Preconditions.checkNotNull(buffer);
+ Objects.requireNonNull(table, "table == null");
+ Objects.requireNonNull(buffer, "buffer == null");
final StateManager impl = new StateManagerImpl(table, buffer);
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/StatefulServiceStateManagerImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/StatefulServiceStateManagerImpl.java
index 64115c94e27..151ac784c5b 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/StatefulServiceStateManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/StatefulServiceStateManagerImpl.java
@@ -17,9 +17,9 @@
package org.apache.hadoop.hdds.scm.ha;
-import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString;
import java.io.IOException;
+import java.util.Objects;
import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType;
import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer;
import org.apache.hadoop.hdds.utils.db.Table;
@@ -127,8 +127,8 @@ public Builder setRatisServer(final SCMRatisServer
ratisServer) {
}
public StatefulServiceStateManager build() {
- Preconditions.checkNotNull(statefulServiceConfig);
- Preconditions.checkNotNull(transactionBuffer);
+ Objects.requireNonNull(statefulServiceConfig, "statefulServiceConfig ==
null");
+ Objects.requireNonNull(transactionBuffer, "transactionBuffer == null");
final StatefulServiceStateManager stateManager =
new StatefulServiceStateManagerImpl(statefulServiceConfig,
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HealthyReadOnlyNodeHandler.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HealthyReadOnlyNodeHandler.java
index e85b0ec32dd..ed438117073 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HealthyReadOnlyNodeHandler.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HealthyReadOnlyNodeHandler.java
@@ -17,8 +17,8 @@
package org.apache.hadoop.hdds.scm.node;
-import com.google.common.base.Preconditions;
import java.io.IOException;
+import java.util.Objects;
import java.util.Set;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -102,9 +102,9 @@ public void onMessage(DatanodeDetails datanodeDetails,
nt.add(datanodeDetails);
// make sure after DN is added back into topology, DatanodeDetails
// instance returned from nodeStateManager has parent correctly set.
- Preconditions.checkNotNull(
+ Objects.requireNonNull(
nodeManager.getNode(datanodeDetails.getID())
- .getParent());
+ .getParent(), "Parent == null");
}
}
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
index 87483538958..9790f289514 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
@@ -17,7 +17,7 @@
package org.apache.hadoop.hdds.scm.node;
-import com.google.common.base.Preconditions;
+import java.util.Objects;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import
org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -31,16 +31,16 @@ public class NodeReportHandler implements
EventHandler<NodeReportFromDatanode> {
private final NodeManager nodeManager;
public NodeReportHandler(NodeManager nodeManager) {
- Preconditions.checkNotNull(nodeManager);
+ Objects.requireNonNull(nodeManager, "nodeManager == null");
this.nodeManager = nodeManager;
}
@Override
public void onMessage(NodeReportFromDatanode nodeReportFromDatanode,
EventPublisher publisher) {
- Preconditions.checkNotNull(nodeReportFromDatanode);
+ Objects.requireNonNull(nodeReportFromDatanode, "nodeReportFromDatanode ==
null");
DatanodeDetails dn = nodeReportFromDatanode.getDatanodeDetails();
- Preconditions.checkNotNull(dn, "NodeReport is "
+ Objects.requireNonNull(dn, "NodeReport is "
+ "missing DatanodeDetails.");
nodeManager
.processNodeReport(dn, nodeReportFromDatanode.getReport());
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 15b545986df..1487c56aafe 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -537,7 +537,7 @@ private boolean isVersionChange(String oldVersion, String
newVersion) {
@Override
public List<SCMCommand<?>> processHeartbeat(DatanodeDetails datanodeDetails,
CommandQueueReportProto
queueReport) {
- Preconditions.checkNotNull(datanodeDetails, "Heartbeat is missing " +
+ Objects.requireNonNull(datanodeDetails, "Heartbeat is missing " +
"DatanodeDetails.");
try {
nodeStateManager.updateLastHeartbeatTime(datanodeDetails);
@@ -1592,7 +1592,7 @@ public int minPipelineLimit(List<DatanodeDetails> dnList)
{
@Override
public Collection<DatanodeDetails> getPeerList(DatanodeDetails dn) {
HashSet<DatanodeDetails> dns = new HashSet<>();
- Preconditions.checkNotNull(dn);
+ Objects.requireNonNull(dn, "dn == null");
Set<PipelineID> pipelines =
nodeStateManager.getPipelineByDnID(dn.getID());
PipelineManager pipelineManager = scmContext.getScm().getPipelineManager();
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
index 4a60767623a..b5429b7cebc 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
@@ -25,6 +25,7 @@
import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
@@ -85,7 +86,7 @@ public enum UtilizationThreshold {
* @return True if this is tracked, false if this map does not know about it.
*/
public boolean isKnownDatanode(UUID datanodeID) {
- Preconditions.checkNotNull(datanodeID);
+ Objects.requireNonNull(datanodeID, "datanodeID == null");
return scmNodeStorageReportMap.containsKey(datanodeID);
}
@@ -107,9 +108,9 @@ public List<UUID> getDatanodeList(
*/
public void insertNewDatanode(UUID datanodeID,
Set<StorageLocationReport> report) throws SCMException {
- Preconditions.checkNotNull(report);
+ Objects.requireNonNull(report, "report == null");
Preconditions.checkState(!report.isEmpty());
- Preconditions.checkNotNull(datanodeID);
+ Objects.requireNonNull(datanodeID, "datanodeID == null");
synchronized (scmNodeStorageReportMap) {
if (isKnownDatanode(datanodeID)) {
throw new SCMException("Node already exists in the map",
@@ -129,8 +130,8 @@ public void insertNewDatanode(UUID datanodeID,
*/
public void updateDatanodeMap(UUID datanodeID,
Set<StorageLocationReport> report) throws SCMException {
- Preconditions.checkNotNull(datanodeID);
- Preconditions.checkNotNull(report);
+ Objects.requireNonNull(datanodeID, "datanodeID == null");
+ Objects.requireNonNull(report, "report == null");
Preconditions.checkState(!report.isEmpty());
synchronized (scmNodeStorageReportMap) {
if (!scmNodeStorageReportMap.containsKey(datanodeID)) {
@@ -143,8 +144,8 @@ public void updateDatanodeMap(UUID datanodeID,
public StorageReportResult processNodeReport(UUID datanodeID,
StorageContainerDatanodeProtocolProtos.NodeReportProto nodeReport)
throws IOException {
- Preconditions.checkNotNull(datanodeID);
- Preconditions.checkNotNull(nodeReport);
+ Objects.requireNonNull(datanodeID, "datanodeID == null");
+ Objects.requireNonNull(nodeReport, "nodeReport == null");
long totalCapacity = 0;
long totalRemaining = 0;
@@ -291,7 +292,7 @@ public long getTotalFreeSpace() {
* @throws SCMException in case the dataNode is not found in the map.
*/
public void removeDatanode(UUID datanodeID) throws SCMException {
- Preconditions.checkNotNull(datanodeID);
+ Objects.requireNonNull(datanodeID, "datanodeID == null");
synchronized (scmNodeStorageReportMap) {
if (!scmNodeStorageReportMap.containsKey(datanodeID)) {
throw new SCMException("No such datanode", NO_SUCH_DATANODE);
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
index 14ce4431e56..f62f8a67b7e 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
@@ -17,8 +17,8 @@
package org.apache.hadoop.hdds.scm.node.states;
-import com.google.common.base.Preconditions;
import java.util.Collections;
+import java.util.Objects;
import java.util.Set;
/**
@@ -34,8 +34,8 @@ private ReportResult(ReportStatus status,
Set<T> missingEntries,
Set<T> newEntries) {
this.status = status;
- Preconditions.checkNotNull(missingEntries);
- Preconditions.checkNotNull(newEntries);
+ Objects.requireNonNull(missingEntries, "missingEntries == null");
+ Objects.requireNonNull(newEntries, "newEntries == null");
this.missingEntries = missingEntries;
this.newEntries = newEntries;
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
index 3a6551b6b05..696d6ecc336 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
@@ -22,6 +22,7 @@
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
+import java.util.Objects;
import java.util.stream.Collectors;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -292,8 +293,8 @@ private List<DatanodeDetails> getResultSetWithTopology(
int nodesRequired, List<DatanodeDetails> healthyNodes,
List<DatanodeDetails> usedNodes)
throws SCMException {
- Preconditions.checkNotNull(usedNodes);
- Preconditions.checkNotNull(healthyNodes);
+ Objects.requireNonNull(usedNodes, "usedNodes == null");
+ Objects.requireNonNull(healthyNodes, "healthyNodes == null");
Preconditions.checkState(nodesRequired >= 1);
if (nodesRequired + usedNodes.size() !=
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
index 50fbc5e492b..9b9d0875157 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
@@ -17,8 +17,8 @@
package org.apache.hadoop.hdds.scm.pipeline;
-import com.google.common.base.Preconditions;
import java.io.IOException;
+import java.util.Objects;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -56,7 +56,7 @@ public PipelineReportHandler(SafeModeManager
scmSafeModeManager,
PipelineManager pipelineManager,
SCMContext scmContext,
ConfigurationSource conf) {
- Preconditions.checkNotNull(pipelineManager);
+ Objects.requireNonNull(pipelineManager, "pipelineManager == null");
this.scmSafeModeManager = scmSafeModeManager;
this.pipelineManager = pipelineManager;
this.scmContext = scmContext;
@@ -65,11 +65,11 @@ public PipelineReportHandler(SafeModeManager
scmSafeModeManager,
@Override
public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode,
EventPublisher publisher) {
- Preconditions.checkNotNull(pipelineReportFromDatanode);
+ Objects.requireNonNull(pipelineReportFromDatanode,
"pipelineReportFromDatanode == null");
DatanodeDetails dn = pipelineReportFromDatanode.getDatanodeDetails();
PipelineReportsProto pipelineReport =
pipelineReportFromDatanode.getReport();
- Preconditions.checkNotNull(dn,
+ Objects.requireNonNull(dn,
"Pipeline Report is missing DatanodeDetails.");
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Processing pipeline report for dn: {}", dn);
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerImpl.java
index ad76f41c2df..5b71b2526b6 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerImpl.java
@@ -17,11 +17,11 @@
package org.apache.hadoop.hdds.scm.pipeline;
-import com.google.common.base.Preconditions;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.NavigableSet;
+import java.util.Objects;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -363,7 +363,7 @@ public Builder setPipelineStore(
}
public PipelineStateManager build() throws IOException {
- Preconditions.checkNotNull(pipelineStore);
+ Objects.requireNonNull(pipelineStore, "pipelineStore == null");
final PipelineStateManager pipelineStateManager =
new PipelineStateManagerImpl(
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
index a1d86bd7071..9696cffabce 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
@@ -29,6 +29,7 @@
import java.util.List;
import java.util.Map;
import java.util.NavigableSet;
+import java.util.Objects;
import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -64,7 +65,7 @@ class PipelineStateMap {
* @throws IOException if pipeline with provided pipelineID already exists
*/
void addPipeline(Pipeline pipeline) throws IOException {
- Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
+ Objects.requireNonNull(pipeline, "Pipeline cannot be null");
Preconditions.checkArgument(
pipeline.getNodes().size() == pipeline.getReplicationConfig()
.getRequiredNodes(),
@@ -93,9 +94,9 @@ void addPipeline(Pipeline pipeline) throws IOException {
*/
void addContainerToPipeline(PipelineID pipelineID, ContainerID containerID)
throws IOException {
- Preconditions.checkNotNull(pipelineID,
+ Objects.requireNonNull(pipelineID,
"Pipeline Id cannot be null");
- Preconditions.checkNotNull(containerID,
+ Objects.requireNonNull(containerID,
"Container Id cannot be null");
Pipeline pipeline = getPipeline(pipelineID);
@@ -114,9 +115,9 @@ void addContainerToPipeline(PipelineID pipelineID,
ContainerID containerID)
*/
void addContainerToPipelineSCMStart(PipelineID pipelineID,
ContainerID containerID) throws IOException {
- Preconditions.checkNotNull(pipelineID,
+ Objects.requireNonNull(pipelineID,
"Pipeline Id cannot be null");
- Preconditions.checkNotNull(containerID,
+ Objects.requireNonNull(containerID,
"Container Id cannot be null");
Pipeline pipeline = getPipeline(pipelineID);
@@ -141,7 +142,7 @@ void addContainerToPipelineSCMStart(PipelineID pipelineID,
* @throws PipelineNotFoundException if pipeline is not found
*/
Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException
{
- Preconditions.checkNotNull(pipelineID,
+ Objects.requireNonNull(pipelineID,
"Pipeline Id cannot be null");
Pipeline pipeline = pipelineMap.get(pipelineID);
@@ -191,9 +192,8 @@ List<Pipeline> getPipelines(ReplicationConfig
replicationConfig) {
*/
List<Pipeline> getPipelines(ReplicationConfig replicationConfig,
PipelineState state) {
- Preconditions
- .checkNotNull(replicationConfig, "ReplicationConfig cannot be null");
- Preconditions.checkNotNull(state, "Pipeline state cannot be null");
+ Objects.requireNonNull(replicationConfig, "ReplicationConfig cannot be
null");
+ Objects.requireNonNull(state, "Pipeline state cannot be null");
if (state == PipelineState.OPEN) {
return new ArrayList<>(
@@ -223,9 +223,8 @@ List<Pipeline> getPipelines(ReplicationConfig
replicationConfig,
*/
int getPipelineCount(ReplicationConfig replicationConfig,
PipelineState state) {
- Preconditions
- .checkNotNull(replicationConfig, "ReplicationConfig cannot be null");
- Preconditions.checkNotNull(state, "Pipeline state cannot be null");
+ Objects.requireNonNull(replicationConfig, "ReplicationConfig cannot be
null");
+ Objects.requireNonNull(state, "Pipeline state cannot be null");
if (state == PipelineState.OPEN) {
return query2OpenPipelines.getOrDefault(
@@ -256,9 +255,8 @@ int getPipelineCount(ReplicationConfig replicationConfig,
List<Pipeline> getPipelines(ReplicationConfig replicationConfig,
PipelineState state, Collection<DatanodeDetails> excludeDns,
Collection<PipelineID> excludePipelines) {
- Preconditions
- .checkNotNull(replicationConfig, "ReplicationConfig cannot be null");
- Preconditions.checkNotNull(state, "Pipeline state cannot be null");
+ Objects.requireNonNull(replicationConfig, "ReplicationConfig cannot be
null");
+ Objects.requireNonNull(state, "Pipeline state cannot be null");
Preconditions
.checkNotNull(excludeDns, "Datanode exclude list cannot be null");
Preconditions
@@ -304,7 +302,7 @@ List<Pipeline> getPipelines(ReplicationConfig
replicationConfig,
*/
NavigableSet<ContainerID> getContainers(PipelineID pipelineID)
throws PipelineNotFoundException {
- Preconditions.checkNotNull(pipelineID,
+ Objects.requireNonNull(pipelineID,
"Pipeline Id cannot be null");
NavigableSet<ContainerID> containerIDs =
pipeline2container.get(pipelineID);
@@ -324,7 +322,7 @@ NavigableSet<ContainerID> getContainers(PipelineID
pipelineID)
*/
int getNumberOfContainers(PipelineID pipelineID)
throws PipelineNotFoundException {
- Preconditions.checkNotNull(pipelineID,
+ Objects.requireNonNull(pipelineID,
"Pipeline Id cannot be null");
Set<ContainerID> containerIDs = pipeline2container.get(pipelineID);
@@ -342,7 +340,7 @@ int getNumberOfContainers(PipelineID pipelineID)
* @throws IOException if the pipeline is not empty or does not exist
*/
Pipeline removePipeline(PipelineID pipelineID) throws IOException {
- Preconditions.checkNotNull(pipelineID, "Pipeline Id cannot be null");
+ Objects.requireNonNull(pipelineID, "Pipeline Id cannot be null");
Pipeline pipeline = getPipeline(pipelineID);
if (!pipeline.isClosed()) {
@@ -365,9 +363,9 @@ Pipeline removePipeline(PipelineID pipelineID) throws
IOException {
*/
void removeContainerFromPipeline(PipelineID pipelineID,
ContainerID containerID) throws IOException {
- Preconditions.checkNotNull(pipelineID,
+ Objects.requireNonNull(pipelineID,
"Pipeline Id cannot be null");
- Preconditions.checkNotNull(containerID,
+ Objects.requireNonNull(containerID,
"container Id cannot be null");
Set<ContainerID> containerIDs = pipeline2container.get(pipelineID);
@@ -389,8 +387,8 @@ void removeContainerFromPipeline(PipelineID pipelineID,
*/
Pipeline updatePipelineState(PipelineID pipelineID, PipelineState state)
throws PipelineNotFoundException {
- Preconditions.checkNotNull(pipelineID, "Pipeline Id cannot be null");
- Preconditions.checkNotNull(state, "Pipeline LifeCycleState cannot be
null");
+ Objects.requireNonNull(pipelineID, "Pipeline Id cannot be null");
+ Objects.requireNonNull(state, "Pipeline LifeCycleState cannot be null");
final Pipeline pipeline = getPipeline(pipelineID);
// Return the old pipeline if updating same state
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java
index e1882cb133f..94964df73a9 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java
@@ -23,6 +23,7 @@
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.hdds.HddsConfigKeys;
@@ -129,7 +130,7 @@ protected synchronized boolean validate() {
@Override
protected synchronized void process(Pipeline pipeline) {
- Preconditions.checkNotNull(pipeline);
+ Objects.requireNonNull(pipeline, "pipeline == null");
// When SCM is in safe mode for long time, already registered
// datanode can send pipeline report again, or SCMPipelineManager will
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java
index 293d3f573b4..4567e0126fc 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java
@@ -20,6 +20,7 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import java.util.HashSet;
+import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.hdds.HddsConfigKeys;
@@ -89,7 +90,7 @@ protected synchronized boolean validate() {
@Override
protected synchronized void process(PipelineReportFromDatanode report) {
- Preconditions.checkNotNull(report);
+ Objects.requireNonNull(report, "report == null");
for (PipelineReport report1 : report.getReport().getPipelineReportList()) {
Pipeline pipeline;
try {
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
index 58b7e315a5b..e597e19a4cc 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
@@ -19,13 +19,13 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType.SCM;
-import com.google.common.base.Preconditions;
import java.io.IOException;
import java.math.BigInteger;
import java.security.cert.X509Certificate;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
+import java.util.Objects;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
@@ -159,7 +159,7 @@ public X509Certificate getCertificateByID(BigInteger
serialID)
public List<X509Certificate> listCertificate(NodeType role,
BigInteger startSerialID, int count)
throws IOException {
- Preconditions.checkNotNull(startSerialID);
+ Objects.requireNonNull(startSerialID, "startSerialID == null");
if (startSerialID.longValue() == 0) {
startSerialID = null;
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index 9617d12e253..1641a5353a3 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -27,10 +27,10 @@
import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.INITIAL_VERSION;
import static
org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto;
-import com.google.common.base.Preconditions;
import com.google.protobuf.Message;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.DatanodeID;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto;
@@ -67,8 +67,8 @@ public final class SCMDatanodeHeartbeatDispatcher {
public SCMDatanodeHeartbeatDispatcher(NodeManager nodeManager,
EventPublisher eventPublisher) {
- Preconditions.checkNotNull(nodeManager);
- Preconditions.checkNotNull(eventPublisher);
+ Objects.requireNonNull(nodeManager, "nodeManager == null");
+ Objects.requireNonNull(eventPublisher, "eventPublisher == null");
this.nodeManager = nodeManager;
this.eventPublisher = eventPublisher;
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index 879fb953667..ece56c094d1 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -41,7 +41,6 @@
import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.protobuf.BlockingService;
import com.google.protobuf.ProtocolMessageEnum;
@@ -51,6 +50,7 @@
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.OptionalLong;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -142,8 +142,8 @@ public SCMDatanodeProtocolServer(final OzoneConfiguration
conf,
// This constructor has broken down to smaller methods so that Recon's
// passive SCM server can override them.
- Preconditions.checkNotNull(scm, "SCM cannot be null");
- Preconditions.checkNotNull(eventPublisher, "EventPublisher cannot be
null");
+ Objects.requireNonNull(scm, "SCM cannot be null");
+ Objects.requireNonNull(eventPublisher, "EventPublisher cannot be null");
this.scm = scm;
this.eventPublisher = eventPublisher;
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 54dbcde5896..2a9c5f0ae49 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -1173,9 +1173,9 @@ public static boolean scmBootstrap(OzoneConfiguration
conf)
scmhaNodeDetails.getLocalNodeDetails().getNodeId());
final ScmInfo scmInfo = HAUtils.getScmInfo(config);
final String fetchedId = scmInfo.getClusterId();
- Preconditions.checkNotNull(fetchedId);
+ Objects.requireNonNull(fetchedId, "fetchedId == null");
if (state == StorageState.INITIALIZED) {
- Preconditions.checkNotNull(scmStorageConfig.getScmId());
+ Objects.requireNonNull(scmStorageConfig.getScmId(), "scmId == null");
if (!fetchedId.equals(persistedClusterId)) {
LOG.error(
"Could not bootstrap as SCM is already initialized with cluster "
@@ -1260,7 +1260,7 @@ public static boolean scmInit(OzoneConfiguration conf,
try {
if (clusterId != null && !clusterId.isEmpty()) {
// clusterId must be an UUID
- Preconditions.checkNotNull(UUID.fromString(clusterId));
+ Objects.requireNonNull(UUID.fromString(clusterId), "clusterId UUID
== null");
scmStorageConfig.setClusterId(clusterId);
}
@@ -1344,7 +1344,7 @@ private static InetSocketAddress
getScmAddress(SCMHANodeDetails haDetails,
ConfigurationSource conf) throws IOException {
List<SCMNodeInfo> scmNodeInfoList = SCMNodeInfo.buildNodeInfo(
conf);
- Preconditions.checkNotNull(scmNodeInfoList, "scmNodeInfoList is null");
+ Objects.requireNonNull(scmNodeInfoList, "scmNodeInfoList is null");
InetSocketAddress scmAddress = null;
if (HddsUtils.getScmServiceId(conf) != null) {
@@ -2224,8 +2224,8 @@ public boolean removePeerFromHARing(String scmId)
checkIfCertSignRequestAllowed(rootCARotationManager, false, configuration,
"removePeerFromHARing");
- Preconditions.checkNotNull(getScmHAManager().getRatisServer()
- .getDivision().getGroup());
+ Objects.requireNonNull(getScmHAManager().getRatisServer()
+ .getDivision().getGroup(), "Group == null");
// check valid scmid in ratis peers list
if (getScmHAManager().getRatisServer().getDivision()
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationManagerImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationManagerImpl.java
index 99a0cb7dc15..8d717e225f4 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationManagerImpl.java
@@ -18,10 +18,10 @@
package org.apache.hadoop.hdds.scm.server.upgrade;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.io.IOException;
import java.util.Collections;
+import java.util.Objects;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -110,7 +110,7 @@ public void buildUpgradeContext(NodeManager nodeManager,
public UpgradeFinalization.StatusAndMessages finalizeUpgrade(
String upgradeClientID)
throws IOException {
- Preconditions.checkNotNull(context, "Cannot finalize upgrade without " +
+ Objects.requireNonNull(context, "Cannot finalize upgrade without " +
"first building the upgrade context.");
return upgradeFinalizer.finalize(upgradeClientID, context);
}
@@ -227,12 +227,12 @@ public Builder setFinalizationExecutor(
}
public FinalizationManagerImpl build() throws IOException {
- Preconditions.checkNotNull(conf);
- Preconditions.checkNotNull(versionManager);
- Preconditions.checkNotNull(storage);
- Preconditions.checkNotNull(scmHAManager);
- Preconditions.checkNotNull(finalizationStore);
- Preconditions.checkNotNull(executor);
+ Objects.requireNonNull(conf, "conf == null");
+ Objects.requireNonNull(versionManager, "versionManager == null");
+ Objects.requireNonNull(storage, "storage == null");
+ Objects.requireNonNull(scmHAManager, "scmHAManager == null");
+ Objects.requireNonNull(finalizationStore, "finalizationStore == null");
+ Objects.requireNonNull(executor, "executor == null");
return new FinalizationManagerImpl(this);
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationStateManagerImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationStateManagerImpl.java
index 9e77e172311..77bc3499d9a 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationStateManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationStateManagerImpl.java
@@ -17,8 +17,8 @@
package org.apache.hadoop.hdds.scm.server.upgrade;
-import com.google.common.base.Preconditions;
import java.io.IOException;
+import java.util.Objects;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol;
@@ -323,9 +323,9 @@ public Builder setTransactionBuffer(DBTransactionBuffer
transactionBuffer) {
}
public FinalizationStateManager build() throws IOException {
- Preconditions.checkNotNull(finalizationStore);
- Preconditions.checkNotNull(transactionBuffer);
- Preconditions.checkNotNull(upgradeFinalizer);
+ Objects.requireNonNull(finalizationStore, "finalizationStore == null");
+ Objects.requireNonNull(transactionBuffer, "transactionBuffer == null");
+ Objects.requireNonNull(upgradeFinalizer, "upgradeFinalizer == null");
return
scmRatisServer.getProxyHandler(SCMRatisProtocol.RequestType.FINALIZE,
FinalizationStateManager.class, new
FinalizationStateManagerImpl(this));
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizationContext.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizationContext.java
index 0582254b0a6..312f71cb74c 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizationContext.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizationContext.java
@@ -17,7 +17,7 @@
package org.apache.hadoop.hdds.scm.server.upgrade;
-import com.google.common.base.Preconditions;
+import java.util.Objects;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -129,13 +129,13 @@ public Builder setConfiguration(OzoneConfiguration
configuration) {
}
public SCMUpgradeFinalizationContext build() {
- Preconditions.checkNotNull(scmContext);
- Preconditions.checkNotNull(pipelineManager);
- Preconditions.checkNotNull(nodeManager);
- Preconditions.checkNotNull(storage);
- Preconditions.checkNotNull(versionManager);
- Preconditions.checkNotNull(conf);
- Preconditions.checkNotNull(finalizationStateManager);
+ Objects.requireNonNull(scmContext, "scmContext == null");
+ Objects.requireNonNull(pipelineManager, "pipelineManager == null");
+ Objects.requireNonNull(nodeManager, "nodeManager == null");
+ Objects.requireNonNull(storage, "storage == null");
+ Objects.requireNonNull(versionManager, "versionManager == null");
+ Objects.requireNonNull(conf, "conf == null");
+ Objects.requireNonNull(finalizationStateManager,
"finalizationStateManager == null");
return new SCMUpgradeFinalizationContext(this);
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]