This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new aa21f1141f0 HDDS-14089. Replace Preconditions.checkNotNull in
ozone-client and -common (#9453)
aa21f1141f0 is described below
commit aa21f1141f035cdf432d6651ff6f743872e10539
Author: ChenChen Lai <[email protected]>
AuthorDate: Sun Dec 7 22:49:00 2025 +0800
HDDS-14089. Replace Preconditions.checkNotNull in ozone-client and -common
(#9453)
---
.../apache/hadoop/ozone/client/OzoneBucket.java | 4 +-
.../hadoop/ozone/client/OzoneClientFactory.java | 16 ++++----
.../ozone/client/OzoneMultipartUploadList.java | 4 +-
.../apache/hadoop/ozone/client/OzoneVolume.java | 4 +-
.../org/apache/hadoop/ozone/client/TenantArgs.java | 4 +-
.../client/checksum/ECBlockChecksumComputer.java | 5 ++-
.../client/io/BlockDataStreamOutputEntryPool.java | 5 ++-
.../client/io/BlockOutputStreamEntryPool.java | 5 ++-
.../hadoop/ozone/client/io/ECKeyOutputStream.java | 3 +-
.../ozone/client/io/KeyDataStreamOutput.java | 5 ++-
.../hadoop/ozone/client/io/KeyOutputStream.java | 5 ++-
.../apache/hadoop/ozone/client/rpc/RpcClient.java | 47 +++++++++++-----------
.../main/java/org/apache/hadoop/ozone/OFSPath.java | 6 +--
.../ozone/om/ha/OMFailoverProxyProviderBase.java | 8 ++--
.../hadoop/ozone/om/helpers/OmBucketArgs.java | 6 +--
.../hadoop/ozone/om/helpers/OmBucketInfo.java | 9 ++---
.../hadoop/ozone/om/helpers/OmRangerSyncArgs.java | 4 +-
.../hadoop/ozone/om/helpers/OmTenantArgs.java | 6 +--
.../hadoop/ozone/om/helpers/OmVolumeArgs.java | 6 +--
.../hadoop/ozone/om/helpers/ServiceInfo.java | 6 +--
.../hadoop/ozone/om/helpers/SnapshotInfo.java | 6 +--
...OzoneManagerProtocolClientSideTranslatorPB.java | 5 ++-
.../hadoop/ozone/security/GDPRSymmetricKey.java | 5 ++-
.../apache/hadoop/ozone/security/acl/OzoneObj.java | 6 +--
.../apache/hadoop/ozone/web/utils/OzoneUtils.java | 4 +-
25 files changed, 95 insertions(+), 89 deletions(-)
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index b9649f09bb1..56642742422 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -23,7 +23,6 @@
import static
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND;
import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.google.common.base.Preconditions;
import java.io.IOException;
import java.time.Instant;
import java.util.ArrayList;
@@ -32,6 +31,7 @@
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
+import java.util.Objects;
import java.util.Stack;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
@@ -1101,7 +1101,7 @@ public boolean isSourcePathExist() {
public static Builder newBuilder(ConfigurationSource conf,
ClientProtocol proxy) {
- Preconditions.checkNotNull(proxy, "Client proxy is not set.");
+ Objects.requireNonNull(proxy, "Client proxy is not set.");
return new Builder(conf, proxy);
}
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
index f67fb181aca..2e4edee5c6b 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
@@ -20,10 +20,10 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY;
-import com.google.common.base.Preconditions;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
+import java.util.Objects;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsUtils;
@@ -96,9 +96,9 @@ public static OzoneClient getRpcClient() throws IOException {
public static OzoneClient getRpcClient(String omHost, Integer omRpcPort,
MutableConfigurationSource config)
throws IOException {
- Preconditions.checkNotNull(omHost);
- Preconditions.checkNotNull(omRpcPort);
- Preconditions.checkNotNull(config);
+ Objects.requireNonNull(omHost, "omHost == null");
+ Objects.requireNonNull(omRpcPort, "omRpcPort == null");
+ Objects.requireNonNull(config, "config == null");
OmUtils.resolveOmHost(omHost, omRpcPort);
config.set(OZONE_OM_ADDRESS_KEY, omHost + ":" + omRpcPort);
return getRpcClient(getClientProtocol(config), config);
@@ -119,8 +119,8 @@ public static OzoneClient getRpcClient(String omHost,
Integer omRpcPort,
*/
public static OzoneClient getRpcClient(String omServiceId,
ConfigurationSource config) throws IOException {
- Preconditions.checkNotNull(omServiceId);
- Preconditions.checkNotNull(config);
+ Objects.requireNonNull(omServiceId, "omServiceId == null");
+ Objects.requireNonNull(config, "config == null");
if (OmUtils.isOmHAServiceId(config, omServiceId)) {
return getRpcClient(getClientProtocol(config, omServiceId), config);
} else {
@@ -143,7 +143,7 @@ public static OzoneClient getRpcClient(String omServiceId,
*/
public static OzoneClient getRpcClient(ConfigurationSource config)
throws IOException {
- Preconditions.checkNotNull(config);
+ Objects.requireNonNull(config, "config == null");
// Doing this explicitly so that when service ids are defined in the
// configuration, we don't fall back to default ozone.om.address defined
@@ -185,7 +185,7 @@ private static OzoneClient getRpcClient(ClientProtocol
clientProtocol,
*/
public static OzoneClient getOzoneClient(Configuration conf,
Token<OzoneTokenIdentifier> token) throws IOException {
- Preconditions.checkNotNull(token, "Null token is not allowed");
+ Objects.requireNonNull(token, "Null token is not allowed");
OzoneTokenIdentifier tokenId = new OzoneTokenIdentifier();
ByteArrayInputStream buf = new ByteArrayInputStream(
token.getIdentifier());
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java
index 1a71182fc13..2d3be9d79fe 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java
@@ -17,8 +17,8 @@
package org.apache.hadoop.ozone.client;
-import com.google.common.base.Preconditions;
import java.util.List;
+import java.util.Objects;
/**
* List of in-flight MPU upoads.
@@ -35,7 +35,7 @@ public OzoneMultipartUploadList(
String nextKeyMarker,
String nextUploadIdMarker,
boolean isTruncated) {
- Preconditions.checkNotNull(uploads);
+ Objects.requireNonNull(uploads, "uploads == null");
this.uploads = uploads;
this.nextKeyMarker = nextKeyMarker;
this.nextUploadIdMarker = nextUploadIdMarker;
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
index d742ebe8d18..333b8f1de5e 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
@@ -19,7 +19,6 @@
import static org.apache.hadoop.ozone.OzoneConsts.QUOTA_RESET;
-import com.google.common.base.Preconditions;
import java.io.IOException;
import java.time.Instant;
import java.util.ArrayList;
@@ -28,6 +27,7 @@
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
+import java.util.Objects;
import org.apache.hadoop.hdds.client.OzoneQuota;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
@@ -401,7 +401,7 @@ public long getRefCount() {
public static Builder newBuilder(ConfigurationSource conf,
ClientProtocol proxy) {
- Preconditions.checkNotNull(proxy, "Client proxy is not set.");
+ Objects.requireNonNull(proxy, "Client proxy is not set.");
return new Builder(conf, proxy);
}
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/TenantArgs.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/TenantArgs.java
index 941678a332f..e3903fd15cc 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/TenantArgs.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/TenantArgs.java
@@ -17,7 +17,7 @@
package org.apache.hadoop.ozone.client;
-import com.google.common.base.Preconditions;
+import java.util.Objects;
/**
* This class encapsulates the arguments for creating a tenant.
@@ -91,7 +91,7 @@ public Builder setForceCreationWhenVolumeExists(
* @return TenantArgs.
*/
public TenantArgs build() {
- Preconditions.checkNotNull(volumeName);
+ Objects.requireNonNull(volumeName, "volumeName == null");
return new TenantArgs(volumeName, forceCreationWhenVolumeExists);
}
}
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java
index 0453b2734d1..7293c7e29be 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java
@@ -22,6 +22,7 @@
import java.nio.ByteBuffer;
import java.security.MessageDigest;
import java.util.List;
+import java.util.Objects;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
@@ -80,7 +81,7 @@ private void computeMd5Crc() {
int parityBytes = getParityBytes(chunkSize, bytesPerCrc);
ByteString stripeChecksum = chunkInfo.getStripeChecksum();
- Preconditions.checkNotNull(stripeChecksum);
+ Objects.requireNonNull(stripeChecksum, "stripeChecksum == null");
final int checksumSize = stripeChecksum.size();
Preconditions.checkArgument(checksumSize % 4 == 0,
"Checksum Bytes size does not match");
@@ -130,7 +131,7 @@ private void computeCompositeCrc() throws IOException {
// Total parity checksum bytes per stripe to remove
int parityBytes = getParityBytes(chunkSize, bytesPerCrc);
- Preconditions.checkNotNull(stripeChecksum);
+ Objects.requireNonNull(stripeChecksum, "stripeChecksum == null");
final int checksumSize = stripeChecksum.size();
Preconditions.checkArgument(checksumSize % 4 == 0,
"Checksum Bytes size does not match");
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
index 77d1fc6fce3..e5ffc2211e7 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
@@ -25,6 +25,7 @@
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
+import java.util.Objects;
import org.apache.hadoop.hdds.client.ContainerBlockID;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
@@ -112,7 +113,7 @@ public void addPreallocateBlocks(OmKeyLocationInfoGroup
version,
}
private void addKeyLocationInfo(OmKeyLocationInfo subKeyInfo) {
- Preconditions.checkNotNull(subKeyInfo.getPipeline());
+ Objects.requireNonNull(subKeyInfo.getPipeline(), "subKeyInfo.getPipeline()
== null");
BlockDataStreamOutputEntry.Builder builder =
new BlockDataStreamOutputEntry.Builder()
.setBlockID(subKeyInfo.getBlockID())
@@ -276,7 +277,7 @@ BlockDataStreamOutputEntry allocateBlockIfNeeded() throws
IOException {
currentStreamIndex++;
}
if (streamEntries.size() <= currentStreamIndex) {
- Preconditions.checkNotNull(omClient);
+ Objects.requireNonNull(omClient, "omClient == null");
// allocate a new block, if a exception happens, log an error and
// throw exception to the caller directly, and the write fails.
allocateNewBlock();
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
index 3ebf5133139..d36e922c447 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
@@ -26,6 +26,7 @@
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
+import java.util.Objects;
import java.util.concurrent.ExecutorService;
import java.util.function.Supplier;
import org.apache.hadoop.hdds.client.ContainerBlockID;
@@ -170,7 +171,7 @@ BlockOutputStreamEntry createStreamEntry(OmKeyLocationInfo
subKeyInfo, boolean f
}
private synchronized void addKeyLocationInfo(OmKeyLocationInfo subKeyInfo,
boolean forRetry) {
- Preconditions.checkNotNull(subKeyInfo.getPipeline());
+ Objects.requireNonNull(subKeyInfo.getPipeline(), "subKeyInfo.getPipeline()
== null");
streamEntries.add(createStreamEntry(subKeyInfo, forRetry));
}
@@ -387,7 +388,7 @@ synchronized BlockOutputStreamEntry
allocateBlockIfNeeded(boolean forRetry) thro
currentStreamIndex++;
}
if (streamEntries.size() <= currentStreamIndex) {
- Preconditions.checkNotNull(omClient);
+ Objects.requireNonNull(omClient, "omClient == null");
// allocate a new block, if a exception happens, log an error and
// throw exception to the caller directly, and the write fails.
allocateNewBlock(forRetry);
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
index 193b4d078f5..a3b6e128132 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
@@ -24,6 +24,7 @@
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
+import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
@@ -394,7 +395,7 @@ private void writeToOutputStream(ECBlockOutputStreamEntry
current,
private void handleException(BlockOutputStreamEntry streamEntry,
IOException exception) throws IOException {
Throwable t = HddsClientUtils.checkForException(exception);
- Preconditions.checkNotNull(t);
+ Objects.requireNonNull(t, "t == null");
boolean containerExclusionException = checkIfContainerToExclude(t);
if (containerExclusionException) {
getBlockOutputStreamEntryPool().getExcludeList().addPipeline(streamEntry.getPipeline().getId());
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
index dedc36af919..f9a47a9f55e 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
@@ -24,6 +24,7 @@
import java.util.Collection;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.UUID;
import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -253,7 +254,7 @@ public void hsync() throws IOException {
private void handleException(BlockDataStreamOutputEntry streamEntry,
IOException exception) throws IOException {
Throwable t = HddsClientUtils.checkForException(exception);
- Preconditions.checkNotNull(t);
+ Objects.requireNonNull(t, "t == null");
boolean retryFailure = checkForRetryFailure(t);
boolean containerExclusionException = false;
if (!retryFailure) {
@@ -266,7 +267,7 @@ private void handleException(BlockDataStreamOutputEntry
streamEntry,
streamEntry.setCurrentPosition(totalSuccessfulFlushedData);
long containerId = streamEntry.getBlockID().getContainerID();
Collection<DatanodeDetails> failedServers = streamEntry.getFailedServers();
- Preconditions.checkNotNull(failedServers);
+ Objects.requireNonNull(failedServers, "failedServers == null");
if (!containerExclusionException) {
BlockDataStreamOutputEntry currentStreamEntry =
blockDataStreamOutputEntryPool.getCurrentStreamEntry();
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index c9e5a312ca9..778ac7e2f4f 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -25,6 +25,7 @@
import java.util.Collection;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.locks.Condition;
@@ -357,7 +358,7 @@ private void handleExceptionInternal(BlockOutputStreamEntry
streamEntry, IOExcep
}
Throwable t = HddsClientUtils.checkForException(exception);
- Preconditions.checkNotNull(t);
+ Objects.requireNonNull(t, "t == null");
boolean retryFailure = checkForRetryFailure(t);
boolean containerExclusionException = false;
if (!retryFailure) {
@@ -384,7 +385,7 @@ private void handleExceptionInternal(BlockOutputStreamEntry
streamEntry, IOExcep
bufferedDataLen <= streamBufferArgs.getStreamBufferMaxSize());
long containerId = streamEntry.getBlockID().getContainerID();
Collection<DatanodeDetails> failedServers = streamEntry.getFailedServers();
- Preconditions.checkNotNull(failedServers);
+ Objects.requireNonNull(failedServers, "failedServers == null");
ExcludeList excludeList = blockOutputStreamEntryPool.getExcludeList();
if (!failedServers.isEmpty()) {
excludeList.addDatanodes(failedServers);
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 4f6ddd76baf..2cd7f11b3df 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -49,6 +49,7 @@
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.SynchronousQueue;
@@ -235,7 +236,7 @@ public class RpcClient implements ClientProtocol {
*/
public RpcClient(ConfigurationSource conf, String omServiceId)
throws IOException {
- Preconditions.checkNotNull(conf);
+ Objects.requireNonNull(conf, "conf == null");
this.conf = conf;
this.ugi = UserGroupInformation.getCurrentUser();
replicationConfigValidator =
@@ -433,7 +434,7 @@ public void createVolume(String volumeName) throws
IOException {
public void createVolume(String volumeName, VolumeArgs volArgs)
throws IOException {
verifyVolumeName(volumeName);
- Preconditions.checkNotNull(volArgs);
+ Objects.requireNonNull(volArgs, "volArgs == null");
verifyCountsQuota(volArgs.getQuotaInNamespace());
verifySpaceQuota(volArgs.getQuotaInBytes());
@@ -476,7 +477,7 @@ public void createVolume(String volumeName, VolumeArgs
volArgs)
public boolean setVolumeOwner(String volumeName, String owner)
throws IOException {
verifyVolumeName(volumeName);
- Preconditions.checkNotNull(owner);
+ Objects.requireNonNull(owner, "owner == null");
return ozoneManagerClient.setOwner(volumeName, owner);
}
@@ -614,7 +615,7 @@ public void createBucket(
throws IOException {
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
- Preconditions.checkNotNull(bucketArgs);
+ Objects.requireNonNull(bucketArgs, "bucketArgs == null");
verifyCountsQuota(bucketArgs.getQuotaInNamespace());
verifySpaceQuota(bucketArgs.getQuotaInBytes());
if (omVersion
@@ -871,7 +872,7 @@ public void createTenant(String tenantId, TenantArgs
tenantArgs)
throws IOException {
Preconditions.checkArgument(StringUtils.isNotBlank(tenantId),
"tenantId cannot be null or empty.");
- Preconditions.checkNotNull(tenantArgs);
+ Objects.requireNonNull(tenantArgs, "tenantArgs == null");
final String volumeName = tenantArgs.getVolumeName();
verifyVolumeName(volumeName);
@@ -1182,7 +1183,7 @@ public void setBucketVersioning(
throws IOException {
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
- Preconditions.checkNotNull(versioning);
+ Objects.requireNonNull(versioning, "versioning == null");
OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
builder.setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -1196,7 +1197,7 @@ public void setBucketStorageType(
throws IOException {
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
- Preconditions.checkNotNull(storageType);
+ Objects.requireNonNull(storageType, "storageType == null");
OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
builder.setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -1252,7 +1253,7 @@ public void setReplicationConfig(
throws IOException {
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
- Preconditions.checkNotNull(replicationConfig);
+ Objects.requireNonNull(replicationConfig, "replicationConfig == null");
if (omVersion
.compareTo(OzoneManagerVersion.ERASURE_CODED_STORAGE_SUPPORT) < 0) {
if (replicationConfig.getReplicationType()
@@ -1517,7 +1518,7 @@ private KeyProvider.KeyVersion getDEK(FileEncryptionInfo
feInfo)
UserGroupInformation proxyUser;
if (getThreadLocalS3Auth() != null) {
String userPrincipal = getThreadLocalS3Auth().getUserPrincipal();
- Preconditions.checkNotNull(userPrincipal);
+ Objects.requireNonNull(userPrincipal, "userPrincipal == null");
UserGroupInformation s3gUGI = UserGroupInformation.createRemoteUser(
userPrincipal);
proxyUser = UserGroupInformation.createProxyUser(
@@ -1544,7 +1545,7 @@ public OzoneInputStream getKey(
throws IOException {
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
- Preconditions.checkNotNull(keyName);
+ Objects.requireNonNull(keyName, "keyName == null");
OmKeyInfo keyInfo = getKeyInfo(volumeName, bucketName, keyName, false);
return getInputStreamWithRetryFunction(keyInfo);
}
@@ -1644,7 +1645,7 @@ public void deleteKey(
throws IOException {
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
- Preconditions.checkNotNull(keyName);
+ Objects.requireNonNull(keyName, "keyName == null");
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -1660,7 +1661,7 @@ public void deleteKeys(
throws IOException {
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
- Preconditions.checkNotNull(keyNameList);
+ Objects.requireNonNull(keyNameList, "keyNameList == null");
OmDeleteKeys omDeleteKeys = new OmDeleteKeys(volumeName, bucketName,
keyNameList);
ozoneManagerClient.deleteKeys(omDeleteKeys);
@@ -1672,7 +1673,7 @@ public Map<String, ErrorInfo> deleteKeys(
throws IOException {
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
- Preconditions.checkNotNull(keyNameList);
+ Objects.requireNonNull(keyNameList, "keyNameList == null");
OmDeleteKeys omDeleteKeys = new OmDeleteKeys(volumeName, bucketName,
keyNameList);
return ozoneManagerClient.deleteKeys(omDeleteKeys, quiet);
@@ -1813,7 +1814,7 @@ public OzoneKeyDetails getS3KeyDetails(String bucketName,
String keyName,
private OmKeyInfo getS3KeyInfo(
String bucketName, String keyName, boolean isHeadOp) throws IOException {
verifyBucketName(bucketName);
- Preconditions.checkNotNull(keyName);
+ Objects.requireNonNull(keyName, "keyName == null");
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
// Volume name is not important, as we call GetKeyInfo with
@@ -1836,7 +1837,7 @@ private OmKeyInfo getS3KeyInfo(
private OmKeyInfo getS3PartKeyInfo(
String bucketName, String keyName, int partNumber) throws IOException {
verifyBucketName(bucketName);
- Preconditions.checkNotNull(keyName);
+ Objects.requireNonNull(keyName, "keyName == null");
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
// Volume name is not important, as we call GetKeyInfo with
@@ -1859,9 +1860,9 @@ private OmKeyInfo getS3PartKeyInfo(
public OmKeyInfo getKeyInfo(
String volumeName, String bucketName, String keyName,
boolean forceUpdateContainerCache) throws IOException {
- Preconditions.checkNotNull(volumeName);
- Preconditions.checkNotNull(bucketName);
- Preconditions.checkNotNull(keyName);
+ Objects.requireNonNull(volumeName, "volumeName == null");
+ Objects.requireNonNull(bucketName, "bucketName == null");
+ Objects.requireNonNull(keyName, "keyName == null");
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -2642,7 +2643,7 @@ public OzoneKey headObject(String volumeName, String
bucketName,
String keyName) throws IOException {
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
- Preconditions.checkNotNull(keyName);
+ Objects.requireNonNull(keyName, "keyName == null");
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -2695,7 +2696,7 @@ public boolean setBucketOwner(String volumeName, String
bucketName,
String owner) throws IOException {
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
- Preconditions.checkNotNull(owner);
+ Objects.requireNonNull(owner, "owner == null");
OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
builder.setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -2744,7 +2745,7 @@ public Map<String, String> getObjectTagging(String
volumeName, String bucketName
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
- Preconditions.checkNotNull(keyName);
+ Objects.requireNonNull(keyName, "keyName == null");
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -2762,7 +2763,7 @@ public void putObjectTagging(String volumeName, String
bucketName,
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
- Preconditions.checkNotNull(keyName);
+ Objects.requireNonNull(keyName, "keyName == null");
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -2781,7 +2782,7 @@ public void deleteObjectTagging(String volumeName, String
bucketName,
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
- Preconditions.checkNotNull(keyName);
+ Objects.requireNonNull(keyName, "keyName == null");
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
index bf123c07ba8..55f54c0c32f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
@@ -25,13 +25,13 @@
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_OFS_SHARED_TMP_DIR_DEFAULT;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
import java.io.IOException;
import java.math.BigInteger;
import java.net.URI;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
+import java.util.Objects;
import java.util.StringTokenizer;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.Path;
@@ -185,7 +185,7 @@ public String toString() {
if (isEmpty()) {
return "";
}
- Preconditions.checkNotNull(authority);
+ Objects.requireNonNull(authority, "authority == null");
StringBuilder sb = new StringBuilder();
if (!isMount()) {
sb.append(volumeName);
@@ -343,7 +343,7 @@ private static String md5Hex(String input) {
*/
@VisibleForTesting
static String getTempMountBucketName(String username) {
- Preconditions.checkNotNull(username);
+ Objects.requireNonNull(username, "username == null");
// TODO: Improve this to "slugify(username)-md5(username)" for better
// readability?
return md5Hex(username);
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java
index f885f414566..d58bbaf02e4 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.om.ha;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
import com.google.protobuf.ServiceException;
import java.io.Closeable;
import java.io.IOException;
@@ -28,6 +27,7 @@
import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsUtils;
@@ -107,9 +107,9 @@ public OMFailoverProxyProviderBase(ConfigurationSource
configuration,
OzoneConfigKeys.OZONE_CLIENT_WAIT_BETWEEN_RETRIES_MILLIS_DEFAULT);
loadOMClientConfigs(conf, omServiceId);
- Preconditions.checkNotNull(omProxies);
- Preconditions.checkNotNull(omNodeIDList);
- Preconditions.checkNotNull(omNodeAddressMap);
+ Objects.requireNonNull(omProxies, "omProxies == null");
+ Objects.requireNonNull(omNodeIDList, "omNodeIDList == null");
+ Objects.requireNonNull(omNodeAddressMap, "omNodeAddressMap == null");
nextProxyIndex = 0;
nextProxyOMNodeId = omNodeIDList.get(nextProxyIndex);
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
index 356d9a3fcd8..6491a2ec146 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
@@ -17,9 +17,9 @@
package org.apache.hadoop.ozone.om.helpers;
-import com.google.common.base.Preconditions;
import java.util.LinkedHashMap;
import java.util.Map;
+import java.util.Objects;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -293,8 +293,8 @@ public Builder setOwnerName(String owner) {
* @return instance of OmBucketArgs.
*/
public OmBucketArgs build() {
- Preconditions.checkNotNull(volumeName);
- Preconditions.checkNotNull(bucketName);
+ Objects.requireNonNull(volumeName, "volumeName == null");
+ Objects.requireNonNull(bucketName, "bucketName == null");
return new OmBucketArgs(this);
}
}
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
index 605e9e10a63..8b91b55859d 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.ozone.om.helpers;
-import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import java.util.ArrayList;
import java.util.LinkedHashMap;
@@ -600,10 +599,10 @@ public Builder setDefaultReplicationConfig(
@Override
protected void validate() {
super.validate();
- Preconditions.checkNotNull(volumeName);
- Preconditions.checkNotNull(bucketName);
- Preconditions.checkNotNull(acls);
- Preconditions.checkNotNull(storageType);
+ Objects.requireNonNull(volumeName, "volumeName == null");
+ Objects.requireNonNull(bucketName, "bucketName == null");
+ Objects.requireNonNull(acls, "acls == null");
+ Objects.requireNonNull(storageType, "storageType == null");
}
@Override
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRangerSyncArgs.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRangerSyncArgs.java
index 95125182c9f..8d91ec3c04b 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRangerSyncArgs.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRangerSyncArgs.java
@@ -17,7 +17,7 @@
package org.apache.hadoop.ozone.om.helpers;
-import com.google.common.base.Preconditions;
+import java.util.Objects;
/**
* This class is used for storing Ranger Sync request args.
@@ -59,7 +59,7 @@ public Builder setNewSyncServiceVersion(long version) {
}
public OmRangerSyncArgs build() {
- Preconditions.checkNotNull(newServiceVersion);
+ Objects.requireNonNull(newServiceVersion, "newServiceVersion == null");
return new OmRangerSyncArgs(newServiceVersion);
}
}
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmTenantArgs.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmTenantArgs.java
index df1ce94fa43..049a06f7a12 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmTenantArgs.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmTenantArgs.java
@@ -17,7 +17,7 @@
package org.apache.hadoop.ozone.om.helpers;
-import com.google.common.base.Preconditions;
+import java.util.Objects;
/**
* This class is used for storing Ozone tenant arguments.
@@ -104,8 +104,8 @@ public Builder setForceCreationWhenVolumeExists(
}
public OmTenantArgs build() {
- Preconditions.checkNotNull(tenantId);
- Preconditions.checkNotNull(volumeName);
+ Objects.requireNonNull(tenantId, "tenantId == null");
+ Objects.requireNonNull(volumeName, "volumeName == null");
return new OmTenantArgs(tenantId, volumeName,
forceCreationWhenVolumeExists);
}
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
index 0390b922edd..3b028d616e8 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
@@ -375,9 +375,9 @@ public Builder decRefCount() {
@Override
protected void validate() {
super.validate();
- Preconditions.checkNotNull(adminName);
- Preconditions.checkNotNull(ownerName);
- Preconditions.checkNotNull(volume);
+ Objects.requireNonNull(adminName, "adminName == null");
+ Objects.requireNonNull(ownerName, "ownerName == null");
+ Objects.requireNonNull(volume, "volume == null");
Preconditions.checkState(refCount >= 0L, "refCount should not be
negative, but was: " + refCount);
}
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
index 728f8bda06f..a94e0025401 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
@@ -18,11 +18,11 @@
package org.apache.hadoop.ozone.om.helpers;
import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
import org.apache.hadoop.ozone.OzoneFsServerDefaults;
import org.apache.hadoop.ozone.OzoneManagerVersion;
@@ -91,8 +91,8 @@ private ServiceInfo(NodeType nodeType,
OzoneManagerVersion omVersion,
OMRoleInfo omRole,
OzoneFsServerDefaults serverDefaults) {
- Preconditions.checkNotNull(nodeType);
- Preconditions.checkNotNull(hostname);
+ Objects.requireNonNull(nodeType, "nodeType == null");
+ Objects.requireNonNull(hostname, "hostname == null");
this.nodeType = nodeType;
this.hostname = hostname;
this.omVersion = omVersion;
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
index 0e2a4b6ee79..27b29871786 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
@@ -23,7 +23,6 @@
import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString;
import java.time.Instant;
import java.time.ZoneId;
@@ -384,7 +383,7 @@ public Builder setLastTransactionInfo(ByteString
lastTransactionInfo) {
}
public SnapshotInfo build() {
- Preconditions.checkNotNull(name);
+ Objects.requireNonNull(name, "name == null");
return new SnapshotInfo(this);
}
}
@@ -537,8 +536,7 @@ public Map<String, String> toAuditMap() {
* Get the name of the checkpoint directory.
*/
public static String getCheckpointDirName(UUID snapshotId, int version) {
- Objects.requireNonNull(snapshotId,
- "SnapshotId is needed to create checkpoint directory");
+ Objects.requireNonNull(snapshotId, "SnapshotId is needed to create
checkpoint directory");
if (version == 0) {
return OM_SNAPSHOT_SEPARATOR + snapshotId;
}
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index 802cf967ab5..8e1f036d4f2 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -37,6 +37,7 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.UUID;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
@@ -833,7 +834,7 @@ private void updateKey(OmKeyArgs args, long clientId,
boolean hsync, boolean rec
throws IOException {
CommitKeyRequest.Builder req = CommitKeyRequest.newBuilder();
List<OmKeyLocationInfo> locationInfoList = args.getLocationInfoList();
- Preconditions.checkNotNull(locationInfoList);
+ Objects.requireNonNull(locationInfoList, "locationInfoList == null");
KeyArgs.Builder keyArgsBuilder = KeyArgs.newBuilder()
.setVolumeName(args.getVolumeName())
.setBucketName(args.getBucketName())
@@ -1663,7 +1664,7 @@ public OmMultipartCommitUploadPartInfo
commitMultipartUploadPart(
OmKeyArgs omKeyArgs, long clientId) throws IOException {
List<OmKeyLocationInfo> locationInfoList = omKeyArgs.getLocationInfoList();
- Preconditions.checkNotNull(locationInfoList);
+ Objects.requireNonNull(locationInfoList, "locationInfoList == null");
MultipartCommitUploadPartRequest.Builder multipartCommitUploadPartRequest
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java
index 83146d3aaed..d0687ffdbd9 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java
@@ -20,6 +20,7 @@
import com.google.common.base.Preconditions;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
+import java.util.Objects;
import java.util.function.BiConsumer;
import javax.crypto.Cipher;
import javax.crypto.NoSuchPaddingException;
@@ -76,12 +77,12 @@ public GDPRSymmetricKey(SecureRandom secureRandom)
*/
public GDPRSymmetricKey(String secret, String algorithm)
throws NoSuchPaddingException, NoSuchAlgorithmException {
- Preconditions.checkNotNull(secret, "Secret cannot be null");
+ Objects.requireNonNull(secret, "Secret cannot be null");
//TODO: When we add feature to allow users to customize the secret length,
// we need to update this length check Precondition
Preconditions.checkArgument(secret.length() == 16,
"Secret must be exactly 16 characters");
- Preconditions.checkNotNull(algorithm, "Algorithm cannot be null");
+ Objects.requireNonNull(algorithm, "Algorithm cannot be null");
this.secret = secret;
this.algorithm = algorithm;
this.secretKey = new SecretKeySpec(
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
index ae3a8ad1592..1aa90da71fb 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
@@ -19,9 +19,9 @@
import static
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.StoreType.valueOf;
-import com.google.common.base.Preconditions;
import java.util.LinkedHashMap;
import java.util.Map;
+import java.util.Objects;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
@@ -37,8 +37,8 @@ public abstract class OzoneObj implements IOzoneObj {
OzoneObj(ResourceType resType, StoreType storeType) {
- Preconditions.checkNotNull(resType);
- Preconditions.checkNotNull(storeType);
+ Objects.requireNonNull(resType, "resType == null");
+ Objects.requireNonNull(storeType, "storeType == null");
this.resType = resType;
this.storeType = storeType;
}
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
index 18a280563d8..da3ec46ef71 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.ozone.web.utils;
-import com.google.common.base.Preconditions;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.charset.Charset;
@@ -25,6 +24,7 @@
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Locale;
+import java.util.Objects;
import java.util.TimeZone;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
@@ -125,7 +125,7 @@ public static String formatTime(long millis) {
* @return time in milliseconds
*/
public static long formatDate(String date) throws ParseException {
- Preconditions.checkNotNull(date, "Date string should not be null.");
+ Objects.requireNonNull(date, "Date string should not be null.");
return DATE_FORMAT.get().parse(date).getTime();
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]