This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 48facbdc391 HDDS-14086. Replace Preconditions.checkNotNull in 
hdds-server-framework (#9449)
48facbdc391 is described below

commit 48facbdc391bf0138be074d08a65aecef36e4ae5
Author: ChenChen Lai <[email protected]>
AuthorDate: Sun Dec 7 20:05:03 2025 +0800

    HDDS-14086. Replace Preconditions.checkNotNull in hdds-server-framework 
(#9449)
---
 ...orageContainerLocationProtocolClientSideTranslatorPB.java |  9 +++++----
 .../java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java  |  4 ++--
 .../java/org/apache/hadoop/hdds/security/OzoneSecretKey.java |  4 ++--
 .../hadoop/hdds/security/token/BlockTokenVerifier.java       |  5 ++---
 .../security/x509/certificate/authority/DefaultCAServer.java |  3 ++-
 .../x509/certificate/authority/profile/DefaultProfile.java   |  4 ++--
 .../x509/certificate/client/DefaultCertificateClient.java    |  2 +-
 .../org/apache/hadoop/hdds/server/events/EventQueue.java     |  3 ++-
 .../org/apache/hadoop/hdds/server/events/EventWatcher.java   |  4 ++--
 .../java/org/apache/hadoop/hdds/server/http/HttpServer2.java |  5 +++--
 .../java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java  |  3 +--
 .../org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java    |  4 ++--
 .../java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java |  5 ++---
 .../main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java  | 11 ++++++-----
 .../java/org/apache/hadoop/ozone/common/StorageInfo.java     | 12 ++++++------
 15 files changed, 40 insertions(+), 38 deletions(-)

diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index 25cfe742f1c..94b2230e68b 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -32,6 +32,7 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Optional;
 import java.util.UUID;
 import java.util.function.Consumer;
@@ -169,7 +170,7 @@ public final class 
StorageContainerLocationProtocolClientSideTranslatorPB
    */
   public StorageContainerLocationProtocolClientSideTranslatorPB(
       SCMContainerLocationFailoverProxyProvider proxyProvider) {
-    Preconditions.checkNotNull(proxyProvider);
+    Objects.requireNonNull(proxyProvider, "proxyProvider == null");
     this.fpp = proxyProvider;
     this.rpcProxy = (StorageContainerLocationProtocolPB) RetryProxy.create(
         StorageContainerLocationProtocolPB.class,
@@ -555,7 +556,7 @@ public HddsProtos.Node queryNode(UUID uuid) throws 
IOException {
   @Override
   public List<DatanodeAdminError> decommissionNodes(List<String> nodes, 
boolean force)
       throws IOException {
-    Preconditions.checkNotNull(nodes);
+    Objects.requireNonNull(nodes, "nodes == null");
     DecommissionNodesRequestProto request =
         DecommissionNodesRequestProto.newBuilder()
         .addAllHosts(nodes).setForce(force)
@@ -579,7 +580,7 @@ public List<DatanodeAdminError> 
decommissionNodes(List<String> nodes, boolean fo
   @Override
   public List<DatanodeAdminError> recommissionNodes(List<String> nodes)
       throws IOException {
-    Preconditions.checkNotNull(nodes);
+    Objects.requireNonNull(nodes, "nodes == null");
     RecommissionNodesRequestProto request =
         RecommissionNodesRequestProto.newBuilder()
             .addAllHosts(nodes)
@@ -608,7 +609,7 @@ public List<DatanodeAdminError> 
recommissionNodes(List<String> nodes)
   @Override
   public List<DatanodeAdminError> startMaintenanceNodes(
       List<String> nodes, int endInHours, boolean force) throws IOException {
-    Preconditions.checkNotNull(nodes);
+    Objects.requireNonNull(nodes, "nodes == null");
     StartMaintenanceNodesRequestProto request =
         StartMaintenanceNodesRequestProto.newBuilder()
             .addAllHosts(nodes)
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java
index 5090c149675..5d1ecbd438e 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java
@@ -17,8 +17,8 @@
 
 package org.apache.hadoop.hdds.scm.proxy;
 
-import com.google.common.base.Preconditions;
 import java.net.InetSocketAddress;
+import java.util.Objects;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -36,7 +36,7 @@ public class SCMProxyInfo {
 
   public SCMProxyInfo(String serviceID, String nodeID,
                       InetSocketAddress rpcAddress) {
-    Preconditions.checkNotNull(rpcAddress);
+    Objects.requireNonNull(rpcAddress, "rpcAddress == null");
     this.serviceId = serviceID;
     this.nodeId = nodeID;
     this.rpcAddrStr = rpcAddress.toString();
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java
index 171f693c993..3f9bc7da839 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java
@@ -17,10 +17,10 @@
 
 package org.apache.hadoop.hdds.security;
 
-import com.google.common.base.Preconditions;
 import java.security.KeyPair;
 import java.security.PrivateKey;
 import java.security.PublicKey;
+import java.util.Objects;
 import org.apache.commons.lang3.builder.EqualsBuilder;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
@@ -42,7 +42,7 @@ public class OzoneSecretKey {
 
   public OzoneSecretKey(int keyId, long expiryDate, KeyPair keyPair,
       String certificateSerialId) {
-    Preconditions.checkNotNull(keyId);
+    Objects.requireNonNull(keyPair, "keyPair == null");
     this.keyId = keyId;
     this.expiryDate = expiryDate;
     this.privateKey = keyPair.getPrivate();
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java
index 5c69d8dc81d..0543390cfee 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java
@@ -23,7 +23,7 @@
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.WRITE;
 
-import com.google.common.base.Preconditions;
+import java.util.Objects;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.client.ContainerBlockID;
@@ -71,8 +71,7 @@ protected OzoneBlockTokenIdentifier createTokenIdentifier() {
   @Override
   protected Object getService(ContainerCommandRequestProtoOrBuilder cmd) {
     BlockID blockID = HddsUtils.getBlockID(cmd);
-    Preconditions.checkNotNull(blockID,
-        "no blockID in %s command", cmd.getCmdType());
+    Objects.requireNonNull(blockID, () -> "blockID == null in command " + 
cmd.getCmdType());
     return getTokenService(blockID);
   }
 
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
index 5430a0f6ce4..5fe79918b9e 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
@@ -38,6 +38,7 @@
 import java.time.ZonedDateTime;
 import java.util.Date;
 import java.util.List;
+import java.util.Objects;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.Future;
 import java.util.concurrent.locks.Lock;
@@ -487,7 +488,7 @@ private KeyPair generateKeys(SecurityConfig securityConfig)
   private void generateRootCertificate(
       SecurityConfig securityConfig, KeyPair key)
       throws IOException, SCMSecurityException {
-    Preconditions.checkNotNull(this.config);
+    Objects.requireNonNull(this.config, "this.config == null");
     ZonedDateTime beginDate = ZonedDateTime.now();
     ZonedDateTime endDate = 
beginDate.plus(securityConfig.getMaxCertificateDuration());
     SelfSignedCertificate.Builder builder = SelfSignedCertificate.newBuilder()
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultProfile.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultProfile.java
index 6d5c2d21b1e..b4c7ad5d4e1 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultProfile.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultProfile.java
@@ -20,7 +20,6 @@
 import static org.bouncycastle.asn1.x509.KeyPurposeId.id_kp_clientAuth;
 import static org.bouncycastle.asn1.x509.KeyPurposeId.id_kp_serverAuth;
 
-import com.google.common.base.Preconditions;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.AbstractMap.SimpleEntry;
@@ -28,6 +27,7 @@
 import java.util.BitSet;
 import java.util.HashSet;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.function.BiPredicate;
 import java.util.stream.Collectors;
@@ -261,7 +261,7 @@ public boolean isSupportedExtension(Extension extension) {
 
   @Override
   public boolean validateExtension(Extension extension) {
-    Preconditions.checkNotNull(extension, "Extension cannot be null");
+    Objects.requireNonNull(extension, "Extension cannot be null");
 
     if (!isSupportedExtension(extension)) {
       LOG.error("Unsupported Extension found: {} ",
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
index a4ae1a79413..2ddc18b55f5 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
@@ -1354,7 +1354,7 @@ public synchronized CompletableFuture<Void> 
getRootCaRotationListener(
   }
 
   public synchronized void startCertificateRenewerService() {
-    Preconditions.checkNotNull(getCertificate(),
+    Objects.requireNonNull(getCertificate(),
         "Component certificate should not be empty");
     // Schedule task to refresh certificate before it expires
     Duration gracePeriod = securityConfig.getRenewalGracePeriod();
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
index 8ea8f6136c3..0ac09e68c1a 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -38,6 +38,7 @@
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.stream.Collectors;
@@ -182,7 +183,7 @@ abstract static class DatanodeDetailsJacksonMixIn {
    */
   public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
       EVENT_TYPE event, EventHandler<PAYLOAD> handler) {
-    Preconditions.checkNotNull(handler, "Handler should not be null.");
+    Objects.requireNonNull(handler, "Handler should not be null.");
     validateEvent(event);
     String executorName = getExecutorName(event, handler);
     SingleThreadExecutor<PAYLOAD> executor =
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
index 17d8ec3e942..757ef9e4689 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
@@ -18,11 +18,11 @@
 package org.apache.hadoop.hdds.server.events;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.function.Predicate;
@@ -77,7 +77,7 @@ public EventWatcher(String name, Event<TIMEOUT_PAYLOAD> 
startEvent,
     this.completionEvent = completionEvent;
     this.leaseManager = leaseManager;
     this.metrics = new EventWatcherMetrics();
-    Preconditions.checkNotNull(name);
+    Objects.requireNonNull(name, "name == null");
     if (name.equals("")) {
       name = getClass().getSimpleName();
     }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
index 44afe93fb43..0b908bbf6ff 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
@@ -44,6 +44,7 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Properties;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -455,7 +456,7 @@ public Builder withoutDefaultApps() {
     }
 
     public HttpServer2 build() throws IOException {
-      Preconditions.checkNotNull(name, "name is not set");
+      Objects.requireNonNull(name, "name is not set");
       Preconditions.checkState(!endpoints.isEmpty(), "No endpoints specified");
 
       if (hostName == null) {
@@ -604,7 +605,7 @@ private HttpServer2(final Builder b) throws IOException {
   }
 
   private void initializeWebServer(Builder builder) throws IOException {
-    Preconditions.checkNotNull(webAppContext);
+    Objects.requireNonNull(webAppContext, "webAppContext == null");
 
     int maxThreads = builder.conf.getInt(HTTP_MAX_THREADS_KEY, -1);
     // If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java
index 7d7d7ad7710..e79a619beb2 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.hdds.utils.db;
 
-import com.google.common.base.Preconditions;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.HashMap;
@@ -123,7 +122,7 @@ public <T> T copyObject(T object, Class<T> format) {
    * @return byte array to store it ini the kv store.
    */
   public <T> byte[] asRawData(T object) throws IOException {
-    Preconditions.checkNotNull(object,
+    Objects.requireNonNull(object,
         "Null value shouldn't be persisted in the database");
     Codec<T> codec = getCodec(object);
     return codec.toPersistedFormat(object);
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
index b2ebf627ee5..16dde4dad2a 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
@@ -19,13 +19,13 @@
 
 import static org.rocksdb.RocksDB.DEFAULT_COLUMN_FAMILY;
 
-import com.google.common.base.Preconditions;
 import java.io.File;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Objects;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedConfigOptions;
@@ -81,7 +81,7 @@ public static File getConfigLocation() {
    * @return Name of the DB File options
    */
   public static String getOptionsFileNameFromDB(String dbFileName) {
-    Preconditions.checkNotNull(dbFileName);
+    Objects.requireNonNull(dbFileName, "dbFileName == null");
     return dbFileName.isEmpty() ? "" : dbFileName + ".ini";
   }
 
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index f935f6d58f8..c92dd5c01de 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -28,7 +28,6 @@
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_DELTA_UPDATE_DATA_SIZE_MAX_LIMIT_DEFAULT;
 import static org.rocksdb.RocksDB.DEFAULT_COLUMN_FAMILY;
 
-import com.google.common.base.Preconditions;
 import java.io.File;
 import java.nio.file.Path;
 import java.nio.file.Paths;
@@ -278,13 +277,13 @@ public DBStoreBuilder setDefaultCFOptions(
   }
 
   public DBStoreBuilder setPath(Path path) {
-    Preconditions.checkNotNull(path);
+    Objects.requireNonNull(path, "path == null");
     dbPath = path;
     return this;
   }
 
   public DBStoreBuilder setOptionsPath(Path optionsPath) {
-    Preconditions.checkNotNull(optionsPath);
+    Objects.requireNonNull(optionsPath, "optionsPath == null");
     this.optionsPath = optionsPath;
     return this;
   }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index cbd1c32c438..c833fed6ab4 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -36,6 +36,7 @@
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.function.Function;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -89,8 +90,8 @@ public class RDBStore implements DBStore {
                   ConfigurationSource configuration,
                   boolean enableRocksDBMetrics)
       throws RocksDatabaseException {
-    Preconditions.checkNotNull(dbFile, "DB file location cannot be null");
-    Preconditions.checkNotNull(families);
+    Objects.requireNonNull(dbFile, "DB file location cannot be null");
+    Objects.requireNonNull(families, "families == null");
     Preconditions.checkArgument(!families.isEmpty());
     this.maxDbUpdatesSizeThreshold = maxDbUpdatesSizeThreshold;
     dbLocation = dbFile;
@@ -99,7 +100,7 @@ public class RDBStore implements DBStore {
 
     try {
       if (enableCompactionDag) {
-        Preconditions.checkNotNull(differLockSupplier, "Differ Lock supplier 
cannot be null when " +
+        Objects.requireNonNull(differLockSupplier, "Differ Lock supplier 
cannot be null when " +
             "compaction dag is enabled");
         rocksDBCheckpointDiffer = RocksDBCheckpointDifferHolder.getInstance(
             getSnapshotMetadataDir(),
@@ -154,7 +155,7 @@ public class RDBStore implements DBStore {
 
       if (enableCompactionDag) {
         ColumnFamily ssInfoTableCF = db.getColumnFamily(SNAPSHOT_INFO_TABLE);
-        Preconditions.checkNotNull(ssInfoTableCF,
+        Objects.requireNonNull(ssInfoTableCF,
             "SnapshotInfoTable column family handle should not be null");
         // Set CF handle in differ to be used in DB listener
         rocksDBCheckpointDiffer.setSnapshotInfoTableCFHandle(
@@ -162,7 +163,7 @@ public class RDBStore implements DBStore {
         // Set CF handle in differ to be store compaction log entry.
         ColumnFamily compactionLogTableCF =
             db.getColumnFamily(COMPACTION_LOG_TABLE);
-        Preconditions.checkNotNull(compactionLogTableCF,
+        Objects.requireNonNull(compactionLogTableCF,
             "CompactionLogTable column family handle should not be null.");
         rocksDBCheckpointDiffer.setCompactionLogTableCFHandle(
             compactionLogTableCF.getHandle());
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
index ebb3eab5c45..7a74258936f 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
@@ -19,9 +19,9 @@
 
 import static org.apache.hadoop.ozone.common.Storage.STORAGE_FILE_VERSION;
 
-import com.google.common.base.Preconditions;
 import java.io.File;
 import java.io.IOException;
+import java.util.Objects;
 import java.util.Properties;
 import java.util.UUID;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
@@ -79,8 +79,8 @@ public class StorageInfo {
    */
   public StorageInfo(NodeType type, String cid, long cT, int layout)
       throws IOException {
-    Preconditions.checkNotNull(type);
-    Preconditions.checkNotNull(cid);
+    Objects.requireNonNull(type, "type == null");
+    Objects.requireNonNull(cid, "cid == null");
     properties.setProperty(NODE_TYPE, type.name());
     properties.setProperty(CLUSTER_ID, cid);
     properties.setProperty(CREATION_TIME, String.valueOf(cT));
@@ -166,7 +166,7 @@ public void setLayoutVersion(int version) {
   private void verifyNodeType(NodeType type)
       throws InconsistentStorageStateException {
     NodeType nodeType = getNodeType();
-    Preconditions.checkNotNull(nodeType);
+    Objects.requireNonNull(nodeType, "nodeType == null");
     if (type != nodeType) {
       throw new InconsistentStorageStateException("Expected NodeType: " + type 
+
           ", but found: " + nodeType);
@@ -176,7 +176,7 @@ private void verifyNodeType(NodeType type)
   private void verifyClusterId()
       throws InconsistentStorageStateException {
     String clusterId = getClusterID();
-    Preconditions.checkNotNull(clusterId);
+    Objects.requireNonNull(clusterId, "clusterId == null");
     if (clusterId.isEmpty()) {
       throw new InconsistentStorageStateException("Cluster ID not found");
     }
@@ -184,7 +184,7 @@ private void verifyClusterId()
 
   private void verifyCreationTime() {
     Long creationTime = getCreationTime();
-    Preconditions.checkNotNull(creationTime);
+    Objects.requireNonNull(creationTime, "creationTime == null");
   }
 
   public void writeTo(File to)


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to