This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 18b27411128 HDDS-14087. Replace Preconditions.checkNotNull in 
hdds-client and -common (#9452)
18b27411128 is described below

commit 18b2741112868dac742956878a6c967b6683bac3
Author: ChenChen Lai <[email protected]>
AuthorDate: Sun Dec 7 21:25:12 2025 +0800

    HDDS-14087. Replace Preconditions.checkNotNull in hdds-client and -common 
(#9452)
---
 .../org/apache/hadoop/hdds/scm/XceiverClientCreator.java    |  4 ++--
 .../java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java  |  7 +++----
 .../org/apache/hadoop/hdds/scm/XceiverClientManager.java    |  9 +++++----
 .../java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java |  3 +--
 .../org/apache/hadoop/hdds/scm/client/HddsClientUtils.java  |  4 ++--
 .../hadoop/hdds/scm/storage/AbstractDataStreamOutput.java   |  2 +-
 .../hadoop/hdds/scm/storage/BlockDataStreamOutput.java      |  9 +++++----
 .../apache/hadoop/hdds/scm/storage/BlockOutputStream.java   |  9 +++++----
 .../src/main/java/org/apache/hadoop/hdds/HddsUtils.java     | 12 ++++--------
 .../org/apache/hadoop/hdds/conf/OzoneConfiguration.java     |  4 ++--
 .../org/apache/hadoop/hdds/protocol/DatanodeDetails.java    |  3 +--
 .../java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java  |  3 ++-
 .../scm/protocolPB/ContainerCommandResponseBuilders.java    |  6 +++---
 .../org/apache/hadoop/hdds/security/SecurityConfig.java     | 13 +++++--------
 .../x509/certificate/utils/CertificateSignRequest.java      | 10 +++++-----
 .../x509/certificate/utils/SelfSignedCertificate.java       |  7 ++++---
 .../java/org/apache/hadoop/hdds/utils/ResourceCache.java    | 12 ++++++------
 .../src/main/java/org/apache/hadoop/ipc_/RetryCache.java    |  4 ++--
 .../hadoop/ozone/container/common/helpers/ChunkInfo.java    |  4 ++--
 .../java/org/apache/hadoop/ozone/util/YamlSerializer.java   |  6 +++---
 20 files changed, 63 insertions(+), 68 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientCreator.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientCreator.java
index 49ca3a09e11..ce3404ae5b7 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientCreator.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientCreator.java
@@ -17,8 +17,8 @@
 
 package org.apache.hadoop.hdds.scm;
 
-import com.google.common.base.Preconditions;
 import java.io.IOException;
+import java.util.Objects;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.scm.client.ClientTrustManager;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -49,7 +49,7 @@ public XceiverClientCreator(ConfigurationSource conf, 
ClientTrustManager trustMa
         OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT);
     this.trustManager = trustManager;
     if (securityEnabled) {
-      Preconditions.checkNotNull(trustManager);
+      Objects.requireNonNull(trustManager, "trustManager == null");
     }
   }
 
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index d53cc957cbf..6cd258c5ba3 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -20,7 +20,6 @@
 import static org.apache.hadoop.hdds.HddsUtils.processForDebug;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayList;
@@ -114,8 +113,8 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   public XceiverClientGrpc(Pipeline pipeline, ConfigurationSource config,
       ClientTrustManager trustManager) {
     super();
-    Preconditions.checkNotNull(pipeline);
-    Preconditions.checkNotNull(config);
+    Objects.requireNonNull(pipeline, "pipeline == null");
+    Objects.requireNonNull(config, "config == null");
     setTimeout(config.getTimeDuration(OzoneConfigKeys.
         OZONE_CLIENT_READ_TIMEOUT, OzoneConfigKeys
         .OZONE_CLIENT_READ_TIMEOUT_DEFAULT, TimeUnit.SECONDS));
@@ -508,7 +507,7 @@ private XceiverClientReply sendCommandWithRetry(
       reply.setResponse(CompletableFuture.completedFuture(responseProto));
       return reply;
     } else {
-      Objects.requireNonNull(ioException);
+      Objects.requireNonNull(ioException, "ioException == null");
       String message = "Failed to execute command {}";
       if (LOG.isDebugEnabled()) {
         LOG.debug(message + " on the pipeline {}.",
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
index 7e38e7d9c2c..2c662f16aaf 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
@@ -29,6 +29,7 @@
 import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalNotification;
 import java.io.IOException;
+import java.util.Objects;
 import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
@@ -79,8 +80,8 @@ public XceiverClientManager(ConfigurationSource conf,
       ScmClientConfig clientConf,
       ClientTrustManager trustManager) throws IOException {
     super(conf, trustManager);
-    Preconditions.checkNotNull(clientConf);
-    Preconditions.checkNotNull(conf);
+    Objects.requireNonNull(clientConf, "clientConf == null");
+    Objects.requireNonNull(conf, "conf == null");
     long staleThresholdMs = clientConf.getStaleThreshold(MILLISECONDS);
 
     this.clientCache = CacheBuilder.newBuilder()
@@ -118,7 +119,7 @@ public Cache<String, XceiverClientSpi> getClientCache() {
   @Override
   public XceiverClientSpi acquireClient(Pipeline pipeline,
       boolean topologyAware) throws IOException {
-    Preconditions.checkNotNull(pipeline);
+    Objects.requireNonNull(pipeline, "pipeline == null");
     Preconditions.checkArgument(pipeline.getNodes() != null);
     Preconditions.checkArgument(!pipeline.getNodes().isEmpty(),
         NO_REPLICA_FOUND);
@@ -133,7 +134,7 @@ public XceiverClientSpi acquireClient(Pipeline pipeline,
   @Override
   public void releaseClient(XceiverClientSpi client, boolean invalidateClient,
       boolean topologyAware) {
-    Preconditions.checkNotNull(client);
+    Objects.requireNonNull(client, "client == null");
     synchronized (clientCache) {
       client.decrementReference();
       if (invalidateClient) {
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 4accbdbaec4..08ad7298e2a 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -392,8 +392,7 @@ public XceiverClientReply sendCommandAsync(
               // able to connect to leader in the pipeline, though the
               // pipeline can still be functional.
               RaftException exception = reply.getException();
-              Preconditions.checkNotNull(exception, "Raft reply failure but " +
-                  "no exception propagated.");
+              Objects.requireNonNull(exception, "Raft reply failure but no 
exception propagated.");
               throw new CompletionException(exception);
             }
             ContainerCommandResponseProto response =
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index ea5fc908a97..8a7abef0926 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -17,11 +17,11 @@
 
 package org.apache.hadoop.hdds.scm.client;
 
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
@@ -219,7 +219,7 @@ public static void verifyKeyName(String keyName) {
    */
   public static <T> void checkNotNull(T... references) {
     for (T ref: references) {
-      Preconditions.checkNotNull(ref);
+      Objects.requireNonNull(ref, "ref == null");
     }
   }
 
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractDataStreamOutput.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractDataStreamOutput.java
index 377c6e80f8c..6dc2f150a24 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractDataStreamOutput.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractDataStreamOutput.java
@@ -105,7 +105,7 @@ protected void handleRetry(IOException exception, 
RetryPolicy retryPolicy)
     if (Thread.currentThread().isInterrupted()) {
       setExceptionAndThrow(exception);
     }
-    Objects.requireNonNull(action);
+    Objects.requireNonNull(action, "action == null");
     Preconditions.checkArgument(
         action.action == RetryPolicy.RetryAction.RetryDecision.RETRY);
     if (action.delayMillis > 0) {
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index 77518b7533b..e398c79ce8d 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -25,6 +25,7 @@
 import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Objects;
 import java.util.Queue;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CompletionException;
@@ -215,11 +216,11 @@ private DataStreamOutput setupStream(Pipeline pipeline) 
throws IOException {
         ContainerCommandRequestMessage.toMessage(builder.build(), null);
 
     if (isDatastreamPipelineMode) {
-      return Preconditions.checkNotNull(xceiverClient.getDataStreamApi())
+      return Objects.requireNonNull(xceiverClient.getDataStreamApi(), 
"xceiverClient.getDataStreamApi() == null")
           .stream(message.getContent().asReadOnlyByteBuffer(),
               RatisHelper.getRoutingTable(pipeline));
     } else {
-      return Preconditions.checkNotNull(xceiverClient.getDataStreamApi())
+      return Objects.requireNonNull(xceiverClient.getDataStreamApi(), 
"xceiverClient.getDataStreamApi() == null")
           .stream(message.getContent().asReadOnlyByteBuffer());
     }
   }
@@ -398,10 +399,10 @@ public void executePutBlock(boolean close,
     long flushPos = totalDataFlushedLength;
     final List<StreamBuffer> byteBufferList;
     if (!force) {
-      Preconditions.checkNotNull(bufferList);
+      Objects.requireNonNull(bufferList, "bufferList == null");
       byteBufferList = buffersForPutBlock;
       buffersForPutBlock = null;
-      Preconditions.checkNotNull(byteBufferList);
+      Objects.requireNonNull(byteBufferList, "byteBufferList == null");
     } else {
       byteBufferList = null;
     }
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
index a03fd037bda..d7076df3ba0 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
@@ -31,6 +31,7 @@
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Objects;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CompletionException;
 import java.util.concurrent.CopyOnWriteArrayList;
@@ -573,10 +574,10 @@ CompletableFuture<PutBlockResult> executePutBlock(boolean 
close,
     long flushPos = totalWriteChunkLength;
     final List<ChunkBuffer> byteBufferList;
     if (!force) {
-      Preconditions.checkNotNull(bufferList);
+      Objects.requireNonNull(bufferList, "bufferList == null");
       byteBufferList = bufferList;
       bufferList = null;
-      Preconditions.checkNotNull(byteBufferList);
+      Objects.requireNonNull(byteBufferList, "byteBufferList == null");
     } else {
       byteBufferList = null;
     }
@@ -945,10 +946,10 @@ private CompletableFuture<PutBlockResult> 
writeChunkToContainer(
         containerBlockData.addChunks(chunkInfo);
       }
       if (putBlockPiggybacking) {
-        Preconditions.checkNotNull(bufferList);
+        Objects.requireNonNull(bufferList, "bufferList == null");
         byteBufferList = bufferList;
         bufferList = null;
-        Preconditions.checkNotNull(byteBufferList);
+        Objects.requireNonNull(byteBufferList, "byteBufferList == null");
 
         blockData = containerBlockData.build();
         LOG.debug("piggyback chunk list {}", blockData);
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 383403cb509..9170f61a3f7 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -635,10 +635,8 @@ public static long getTime() {
    *     ancestor of {@code path}
    */
   public static void validatePath(Path path, Path ancestor) {
-    Preconditions.checkNotNull(path,
-        "Path should not be null");
-    Preconditions.checkNotNull(ancestor,
-        "Ancestor should not be null");
+    Objects.requireNonNull(path, "Path should not be null");
+    Objects.requireNonNull(ancestor, "Ancestor should not be null");
     Preconditions.checkArgument(
         path.normalize().startsWith(ancestor.normalize()),
         "Path %s should be a descendant of %s", path, ancestor);
@@ -852,8 +850,7 @@ public static String threadNamePrefix(@Nullable Object id) {
    * Transform a protobuf UUID to Java UUID.
    */
   public static UUID fromProtobuf(HddsProtos.UUID uuid) {
-    Objects.requireNonNull(uuid,
-        "HddsProtos.UUID can't be null to transform to java UUID.");
+    Objects.requireNonNull(uuid, "HddsProtos.UUID can't be null to transform 
to java UUID.");
     return new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits());
   }
 
@@ -861,8 +858,7 @@ public static UUID fromProtobuf(HddsProtos.UUID uuid) {
    * Transform a Java UUID to protobuf UUID.
    */
   public static HddsProtos.UUID toProtobuf(UUID uuid) {
-    Objects.requireNonNull(uuid,
-        "UUID can't be null to transform to protobuf UUID.");
+    Objects.requireNonNull(uuid, "UUID can't be null to transform to protobuf 
UUID.");
     return HddsProtos.UUID.newBuilder()
         .setMostSigBits(uuid.getMostSignificantBits())
         .setLeastSigBits(uuid.getLeastSignificantBits())
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index fc09a5a5933..384aded25b5 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -22,7 +22,6 @@
 import static 
org.apache.hadoop.hdds.ratis.RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CONTAINER_COPY_WORKDIR;
 
-import com.google.common.base.Preconditions;
 import java.net.URL;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -33,6 +32,7 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Properties;
 import java.util.SortedSet;
 import java.util.TreeSet;
@@ -87,7 +87,7 @@ public static OzoneConfiguration of(OzoneConfiguration 
source) {
   }
 
   public static OzoneConfiguration of(Configuration conf) {
-    Preconditions.checkNotNull(conf);
+    Objects.requireNonNull(conf, "conf == null");
 
     return conf instanceof OzoneConfiguration
         ? (OzoneConfiguration) conf
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
index a780a96637d..2585f593611 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -23,7 +23,6 @@
 import static 
org.apache.hadoop.ozone.ClientVersion.VERSION_HANDLES_UNKNOWN_DN_PORTS;
 
 import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableSet;
 import com.google.protobuf.ByteString;
@@ -960,7 +959,7 @@ public Builder setCurrentVersion(int v) {
      * @return DatanodeDetails
      */
     public DatanodeDetails build() {
-      Preconditions.checkNotNull(id);
+      Objects.requireNonNull(id, "id == null");
       if (networkLocation == null || networkLocation.getString().isEmpty()) {
         networkLocation = NetConstants.BYTE_STRING_DEFAULT_RACK;
       }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
index 682026fdb1d..c55646ad1ab 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
@@ -29,6 +29,7 @@
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -278,7 +279,7 @@ public void remove(Node node) {
       // find the next ancestor node
       String ancestorName = getNextLevelAncestorName(node);
       InnerNodeImpl childNode = (InnerNodeImpl)childrenMap.get(ancestorName);
-      Preconditions.checkNotNull(childNode, "InnerNode is deleted before 
leaf");
+      Objects.requireNonNull(childNode, "InnerNode is deleted before leaf");
       // remove node from the parent node
       childNode.remove(node);
       // if the parent node has no children, remove the parent node too
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java
index 3fc97e97ffa..c60f3d7449a 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java
@@ -19,9 +19,9 @@
 
 import static 
org.apache.hadoop.hdds.scm.utils.ClientCommandsUtils.getReadChunkVersion;
 
-import com.google.common.base.Preconditions;
 import java.nio.ByteBuffer;
 import java.util.List;
+import java.util.Objects;
 import java.util.function.Function;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -248,7 +248,7 @@ public static ContainerCommandResponseProto 
getGetSmallFileResponseSuccess(
       ContainerCommandRequestProto request, List<ByteString> dataBuffers,
       ChunkInfo info) {
 
-    Preconditions.checkNotNull(request);
+    Objects.requireNonNull(request, "request == null");
 
     boolean isReadChunkV0 = getReadChunkVersion(request.getGetSmallFile())
         .equals(ContainerProtos.ReadChunkVersion.V0);
@@ -294,7 +294,7 @@ public static ContainerCommandResponseProto 
getGetSmallFileResponseSuccess(
   public static ContainerCommandResponseProto getReadContainerResponse(
       ContainerCommandRequestProto request, ContainerDataProto containerData) {
 
-    Preconditions.checkNotNull(containerData);
+    Objects.requireNonNull(containerData, "containerData == null");
 
     ReadContainerResponseProto.Builder response =
         ReadContainerResponseProto.newBuilder()
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/SecurityConfig.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/SecurityConfig.java
index ec8bfdff585..645b3e0b663 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/SecurityConfig.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/SecurityConfig.java
@@ -74,7 +74,6 @@
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
 
-import com.google.common.base.Preconditions;
 import java.io.IOException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
@@ -82,6 +81,7 @@
 import java.security.Provider;
 import java.security.Security;
 import java.time.Duration;
+import java.util.Objects;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -143,7 +143,7 @@ public class SecurityConfig {
    * @param configuration - HDDS Configuration
    */
   public SecurityConfig(ConfigurationSource configuration) {
-    Preconditions.checkNotNull(configuration, "Configuration cannot be null");
+    Objects.requireNonNull(configuration, "Configuration cannot be null");
     this.size = configuration.getInt(HDDS_KEY_LEN, HDDS_DEFAULT_KEY_LEN);
     this.keyAlgo = configuration.get(HDDS_KEY_ALGORITHM,
         HDDS_DEFAULT_KEY_ALGORITHM);
@@ -425,8 +425,7 @@ public String getPrivateKeyFileName() {
    * @return Path Key location.
    */
   public Path getKeyLocation(String component) {
-    Preconditions.checkNotNull(this.metadataDir, "Metadata directory can't be"
-        + " null. Please check configs.");
+    Objects.requireNonNull(this.metadataDir, "Metadata directory can't be 
null. Please check configs.");
     return Paths.get(metadataDir, component, keyDir);
   }
 
@@ -438,8 +437,7 @@ public Path getKeyLocation(String component) {
    * @return Path location.
    */
   public Path getCertificateLocation(String component) {
-    Preconditions.checkNotNull(this.metadataDir, "Metadata directory can't be"
-        + " null. Please check configs.");
+    Objects.requireNonNull(this.metadataDir, "Metadata directory can't be 
null. Please check configs.");
     return Paths.get(metadataDir, component, certificateDir);
   }
 
@@ -450,8 +448,7 @@ public Path getCertificateLocation(String component) {
    * @return Path location.
    */
   public Path getLocation(String component) {
-    Preconditions.checkNotNull(this.metadataDir, "Metadata directory can't be"
-        + " null. Please check configs.");
+    Objects.requireNonNull(this.metadataDir, "Metadata directory can't be 
null. Please check configs.");
     return Paths.get(metadataDir, component);
   }
 
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java
index 5177e016791..2902c43713d 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java
@@ -28,6 +28,7 @@
 import java.security.KeyPair;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Objects;
 import java.util.Optional;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.validator.routines.DomainValidator;
@@ -264,7 +265,7 @@ public Builder setDigitalEncryption(boolean dEncryption) {
     // Support SAN extension with DNS and RFC822 Name
     // other name type will be added as needed.
     public CertificateSignRequest.Builder addDnsName(String dnsName) {
-      Preconditions.checkNotNull(dnsName, "dnsName cannot be null");
+      Objects.requireNonNull(dnsName, "dnsName cannot be null");
       this.addAltName(GeneralName.dNSName, dnsName);
       return this;
     }
@@ -283,7 +284,7 @@ public boolean hasDnsName() {
 
     // IP address is subject to change which is optional for now.
     public CertificateSignRequest.Builder addIpAddress(String ip) {
-      Preconditions.checkNotNull(ip, "Ip address cannot be null");
+      Objects.requireNonNull(ip, "Ip address cannot be null");
       this.addAltName(GeneralName.iPAddress, ip);
       return this;
     }
@@ -321,8 +322,7 @@ public CertificateSignRequest.Builder addInetAddresses(
 
     public CertificateSignRequest.Builder addServiceName(
         String serviceName) {
-      Preconditions.checkNotNull(
-          serviceName, "Service Name cannot be null");
+      Objects.requireNonNull(serviceName, "Service Name cannot be null");
 
       this.addAltName(GeneralName.otherName, serviceName);
       return this;
@@ -418,7 +418,7 @@ private Extensions createExtensions() throws IOException {
     }
 
     public CertificateSignRequest build() throws SCMSecurityException {
-      Preconditions.checkNotNull(key, "KeyPair cannot be null");
+      Objects.requireNonNull(key, "KeyPair cannot be null");
       Preconditions.checkArgument(StringUtils.isNotBlank(subject), "Subject " +
           "cannot be blank");
 
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java
index 01a1c05b426..c4a7a01f5a8 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java
@@ -32,6 +32,7 @@
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.List;
+import java.util.Objects;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.validator.routines.DomainValidator;
 import org.apache.hadoop.hdds.security.SecurityConfig;
@@ -247,14 +248,14 @@ public Builder addInetAddresses(List<InetAddress> 
addresses,
     // Support SAN extension with DNS and RFC822 Name
     // other name type will be added as needed.
     public Builder addDnsName(String dnsName) {
-      Preconditions.checkNotNull(dnsName, "dnsName cannot be null");
+      Objects.requireNonNull(dnsName, "dnsName cannot be null");
       this.addAltName(GeneralName.dNSName, dnsName);
       return this;
     }
 
     // IP address is subject to change which is optional for now.
     public Builder addIpAddress(String ip) {
-      Preconditions.checkNotNull(ip, "Ip address cannot be null");
+      Objects.requireNonNull(ip, "Ip address cannot be null");
       this.addAltName(GeneralName.iPAddress, ip);
       return this;
     }
@@ -293,7 +294,7 @@ private ASN1Object addOtherNameAsn1Object(String name) {
 
     public X509Certificate build()
         throws SCMSecurityException, IOException {
-      Preconditions.checkNotNull(key, "Key cannot be null");
+      Objects.requireNonNull(key, "Key cannot be null");
       Preconditions.checkArgument(StringUtils.isNotBlank(subject),
           "Subject " + "cannot be blank");
       Preconditions.checkArgument(StringUtils.isNotBlank(clusterID),
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceCache.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceCache.java
index dbe165dbd49..091467215c0 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceCache.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceCache.java
@@ -34,7 +34,7 @@ public class ResourceCache<K, V> implements Cache<K, V> {
   public ResourceCache(
       Weigher<K, V> weigher, long limits,
       RemovalListener<K, V> listener) {
-    Objects.requireNonNull(weigher);
+    Objects.requireNonNull(weigher, "weigher == null");
     if (listener == null) {
       cache = CacheBuilder.newBuilder()
           .maximumWeight(limits).weigher(weigher).build();
@@ -47,26 +47,26 @@ public ResourceCache(
 
   @Override
   public V get(K key) {
-    Objects.requireNonNull(key);
+    Objects.requireNonNull(key, "key == null");
     return cache.getIfPresent(key);
   }
 
   @Override
   public void put(K key, V value) throws InterruptedException {
-    Objects.requireNonNull(key);
-    Objects.requireNonNull(value);
+    Objects.requireNonNull(key, "key == null");
+    Objects.requireNonNull(value, "value == null");
     cache.put(key, value);
   }
 
   @Override
   public void remove(K key) {
-    Objects.requireNonNull(key);
+    Objects.requireNonNull(key, "key == null");
     cache.invalidate(key);
   }
 
   @Override
   public void removeIf(Predicate<K> predicate) {
-    Objects.requireNonNull(predicate);
+    Objects.requireNonNull(predicate, "predicate == null");
     for (K key : cache.asMap().keySet()) {
       if (predicate.test(key)) {
         remove(key);
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ipc_/RetryCache.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ipc_/RetryCache.java
index 6467ed56ae6..47edb5c26fb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ipc_/RetryCache.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ipc_/RetryCache.java
@@ -19,6 +19,7 @@
 
 
 import java.util.Arrays;
+import java.util.Objects;
 import java.util.UUID;
 import java.util.concurrent.locks.ReentrantLock;
 
@@ -273,8 +274,7 @@ private CacheEntry waitForCompletion(CacheEntry newEntry) {
       lock.unlock();
     }
     // Entry already exists in cache. Wait for completion and return its state
-    Preconditions.checkNotNull(mapEntry,
-        "Entry from the cache should not be null");
+    Objects.requireNonNull(mapEntry, "Entry from the cache should not be 
null");
     // Wait for in progress request to complete
     synchronized (mapEntry) {
       while (mapEntry.state == CacheEntry.INPROGRESS) {
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
index 586fe76a49a..f507ab15029 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
@@ -17,9 +17,9 @@
 
 package org.apache.hadoop.ozone.container.common.helpers;
 
-import com.google.common.base.Preconditions;
 import java.io.IOException;
 import java.util.Map;
+import java.util.Objects;
 import java.util.TreeMap;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.common.Checksum;
@@ -80,7 +80,7 @@ public void addMetadata(String key, String value) throws 
IOException {
    */
   public static ChunkInfo getFromProtoBuf(ContainerProtos.ChunkInfo info)
       throws IOException {
-    Preconditions.checkNotNull(info);
+    Objects.requireNonNull(info, "info == null");
 
     ChunkInfo chunkInfo = new ChunkInfo(info.getChunkName(), info.getOffset(),
         info.getLen());
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java
index 11e43383f8e..edb015068a2 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java
@@ -17,11 +17,11 @@
 
 package org.apache.hadoop.ozone.util;
 
-import com.google.common.base.Preconditions;
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.file.Files;
+import java.util.Objects;
 import org.apache.commons.pool2.BasePooledObjectFactory;
 import org.apache.commons.pool2.impl.GenericObjectPool;
 import org.apache.hadoop.hdds.server.YamlUtils;
@@ -70,7 +70,7 @@ public Yaml get() {
    */
   @Override
   public T load(File yamlFile) throws IOException {
-    Preconditions.checkNotNull(yamlFile, "yamlFile cannot be null");
+    Objects.requireNonNull(yamlFile, "yamlFile cannot be null");
     try (InputStream inputFileStream = 
Files.newInputStream(yamlFile.toPath())) {
       return load(inputFileStream);
     }
@@ -103,7 +103,7 @@ public T load(InputStream input) throws IOException {
    */
   @Override
   public boolean verifyChecksum(T data) throws IOException {
-    Preconditions.checkNotNull(data, "data cannot be null");
+    Objects.requireNonNull(data, "data cannot be null");
 
     // Get the stored checksum
     String storedChecksum = data.getChecksum();


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to