This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new bba8a67831 HDDS-12750. Move StorageTypeProto from
ScmServerDatanodeHeartbeatProtocol.proto to hdds.proto (#8208)
bba8a67831 is described below
commit bba8a6783114bb2e9bd05755232e561274b74714
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Thu Apr 3 10:47:24 2025 -0700
HDDS-12750. Move StorageTypeProto from
ScmServerDatanodeHeartbeatProtocol.proto to hdds.proto (#8208)
---
.../common/impl/StorageLocationReport.java | 32 +---------------------
.../interface-client/src/main/proto/hdds.proto | 11 ++++++++
.../interface-client/src/main/resources/proto.lock | 25 +++++++++++++++++
.../proto/ScmServerDatanodeHeartbeatProtocol.proto | 10 -------
.../interface-server/src/main/resources/proto.lock | 25 -----------------
.../hadoop/hdds/scm/node/SCMNodeManager.java | 8 ++----
.../org/apache/hadoop/hdds/scm/HddsTestUtils.java | 2 +-
.../hdds/scm/TestSCMCommonPlacementPolicy.java | 2 +-
.../apache/hadoop/hdds/protocol/StorageType.java | 2 +-
.../ozone/om/TestBucketLayoutWithOlderClient.java | 3 +-
.../src/main/proto/OmClientProtocol.proto | 11 ++------
.../interface-client/src/main/resources/proto.lock | 27 ++----------------
...TestOzoneManagerDoubleBufferWithOMResponse.java | 3 +-
.../ozone/om/request/OMRequestTestUtils.java | 2 +-
.../request/TestOMClientRequestWithUserInfo.java | 3 +-
.../request/bucket/TestOMBucketCreateRequest.java | 2 +-
.../hadoop/ozone/recon/api/TestEndpoints.java | 2 +-
.../ozone/recon/api/TestOpenContainerCount.java | 2 +-
.../hadoop/ozone/freon/SCMThroughputBenchmark.java | 2 +-
19 files changed, 59 insertions(+), 115 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
index 022863c2a7..9fd094c6a6 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
@@ -20,9 +20,9 @@
import java.io.IOException;
import net.jcip.annotations.Immutable;
import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
import
org.apache.hadoop.ozone.container.common.interfaces.StorageLocationReportMXBean;
/**
@@ -227,36 +227,6 @@ public static StorageLocationReport
getFromProtobuf(StorageReportProto report)
return builder.build();
}
- /**
- * Returns the StorageLocationReport from the protoBuf message.
- * @param report MetadataStorageReportProto
- * @return StorageLocationReport
- * @throws IOException in case of invalid storage type
- */
-
- public static StorageLocationReport getMetadataFromProtobuf(
- MetadataStorageReportProto report) throws IOException {
- StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
- builder.setStorageLocation(report.getStorageLocation());
- if (report.hasCapacity()) {
- builder.setCapacity(report.getCapacity());
- }
- if (report.hasScmUsed()) {
- builder.setScmUsed(report.getScmUsed());
- }
- if (report.hasStorageType()) {
- builder.setStorageType(getStorageType(report.getStorageType()));
- }
- if (report.hasRemaining()) {
- builder.setRemaining(report.getRemaining());
- }
-
- if (report.hasFailed()) {
- builder.setFailed(report.getFailed());
- }
- return builder.build();
- }
-
/**
* Returns StorageLocation.Builder instance.
*
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index 6cf58d3b1e..45dd22e49f 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -150,6 +150,17 @@ message KeyValue {
optional string value = 2;
}
+/**
+ * Types of storage media.
+ */
+enum StorageTypeProto {
+ DISK = 1;
+ SSD = 2;
+ ARCHIVE = 3;
+ RAM_DISK = 4;
+ PROVIDED = 5;
+}
+
/**
* Type of the node.
*/
diff --git a/hadoop-hdds/interface-client/src/main/resources/proto.lock
b/hadoop-hdds/interface-client/src/main/resources/proto.lock
index e97243906e..2c027a3175 100644
--- a/hadoop-hdds/interface-client/src/main/resources/proto.lock
+++ b/hadoop-hdds/interface-client/src/main/resources/proto.lock
@@ -1827,6 +1827,31 @@
}
]
},
+ {
+ "name": "StorageTypeProto",
+ "enum_fields": [
+ {
+ "name": "DISK",
+ "integer": 1
+ },
+ {
+ "name": "SSD",
+ "integer": 2
+ },
+ {
+ "name": "ARCHIVE",
+ "integer": 3
+ },
+ {
+ "name": "RAM_DISK",
+ "integer": 4
+ },
+ {
+ "name": "PROVIDED",
+ "integer": 5
+ }
+ ]
+ },
{
"name": "NodeType",
"enum_fields": [
diff --git
a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
index 648cf77883..edefc23ce5 100644
---
a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
+++
b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
@@ -193,16 +193,6 @@ message MetadataStorageReportProto {
optional bool failed = 6 [default = false];
}
-/**
- * Types of recognized storage media.
- */
-enum StorageTypeProto {
- DISK = 1;
- SSD = 2;
- ARCHIVE = 3;
- RAM_DISK = 4;
- PROVIDED = 5;
-}
message ContainerReportsProto {
repeated ContainerReplicaProto reports = 1;
diff --git a/hadoop-hdds/interface-server/src/main/resources/proto.lock
b/hadoop-hdds/interface-server/src/main/resources/proto.lock
index 10b246418d..3ed18f25cc 100644
--- a/hadoop-hdds/interface-server/src/main/resources/proto.lock
+++ b/hadoop-hdds/interface-server/src/main/resources/proto.lock
@@ -805,31 +805,6 @@
}
]
},
- {
- "name": "StorageTypeProto",
- "enum_fields": [
- {
- "name": "DISK",
- "integer": 1
- },
- {
- "name": "SSD",
- "integer": 2
- },
- {
- "name": "ARCHIVE",
- "integer": 3
- },
- {
- "name": "RAM_DISK",
- "integer": 4
- },
- {
- "name": "PROVIDED",
- "integer": 5
- }
- ]
- },
{
"name": "ContainerReplicaProto.State",
"enum_fields": [
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 2da48d175b..5d72d7428f 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -56,7 +56,7 @@
import org.apache.hadoop.hdds.protocol.DatanodeID;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
@@ -1098,16 +1098,14 @@ public Map<String, Long> getNodeInfo() {
}
List<StorageReportProto> storageReportProtos = node.getStorageReports();
for (StorageReportProto reportProto : storageReportProtos) {
- if (reportProto.getStorageType() ==
- StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK) {
+ if (reportProto.getStorageType() == StorageTypeProto.DISK) {
nodeInfo.compute(keyPrefix + UsageMetrics.DiskCapacity.name(),
(k, v) -> v + reportProto.getCapacity());
nodeInfo.compute(keyPrefix + UsageMetrics.DiskRemaining.name(),
(k, v) -> v + reportProto.getRemaining());
nodeInfo.compute(keyPrefix + UsageMetrics.DiskUsed.name(),
(k, v) -> v + reportProto.getScmUsed());
- } else if (reportProto.getStorageType() ==
- StorageContainerDatanodeProtocolProtos.StorageTypeProto.SSD) {
+ } else if (reportProto.getStorageType() == StorageTypeProto.SSD) {
nodeInfo.compute(keyPrefix + UsageMetrics.SSDCapacity.name(),
(k, v) -> v + reportProto.getCapacity());
nodeInfo.compute(keyPrefix + UsageMetrics.SSDRemaining.name(),
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
index 9551ca63b4..76356c2282 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
@@ -51,7 +52,6 @@
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
index 920fd46d8c..818a4eda15 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
@@ -17,8 +17,8 @@
package org.apache.hadoop.hdds.scm;
+import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto.DISK;
import static
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED;
-import static
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java
index 1180c33ccc..4f99e699ae 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java
@@ -17,7 +17,7 @@
package org.apache.hadoop.hdds.protocol;
-import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.StorageTypeProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
/**
* Ozone specific storage types.
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java
index 9b9bef045c..e3f41555ab 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java
@@ -21,6 +21,7 @@
import static org.junit.jupiter.api.Assertions.assertNotNull;
import java.util.UUID;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.ClientVersion;
import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -88,7 +89,7 @@ public void testCreateBucketWithOlderClient() throws
Exception {
OzoneManagerProtocolProtos.BucketInfo.newBuilder()
.setVolumeName(volumeName).setBucketName(buckName)
.setIsVersionEnabled(false).setStorageType(
- OzoneManagerProtocolProtos.StorageTypeProto.DISK)
+ StorageTypeProto.DISK)
.build())
.build()).build();
createBucketReq = createBucketReq.toBuilder()
diff --git
a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 7c6afa0407..d3412725ca 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -754,7 +754,7 @@ message BucketInfo {
required string bucketName = 2;
repeated OzoneAclInfo acls = 3;
required bool isVersionEnabled = 4 [default = false];
- required StorageTypeProto storageType = 5 [default = DISK];
+ required hadoop.hdds.StorageTypeProto storageType = 5 [default = DISK];
optional uint64 creationTime = 6;
repeated hadoop.hdds.KeyValue metadata = 7;
optional BucketEncryptionInfoProto beinfo = 8;
@@ -772,13 +772,6 @@ message BucketInfo {
optional hadoop.hdds.DefaultReplicationConfig defaultReplicationConfig =
20;
}
-enum StorageTypeProto {
- DISK = 1;
- SSD = 2;
- ARCHIVE = 3;
- RAM_DISK = 4;
-}
-
enum BucketLayoutProto {
LEGACY = 1;
FILE_SYSTEM_OPTIMIZED = 2;
@@ -843,7 +836,7 @@ message BucketArgs {
required string volumeName = 1;
required string bucketName = 2;
optional bool isVersionEnabled = 5;
- optional StorageTypeProto storageType = 6;
+ optional hadoop.hdds.StorageTypeProto storageType = 6;
repeated hadoop.hdds.KeyValue metadata = 7;
optional uint64 quotaInBytes = 8;
optional uint64 quotaInNamespace = 9;
diff --git a/hadoop-ozone/interface-client/src/main/resources/proto.lock
b/hadoop-ozone/interface-client/src/main/resources/proto.lock
index 7fe80ee381..59e91e74d2 100644
--- a/hadoop-ozone/interface-client/src/main/resources/proto.lock
+++ b/hadoop-ozone/interface-client/src/main/resources/proto.lock
@@ -982,27 +982,6 @@
}
]
},
- {
- "name": "StorageTypeProto",
- "enum_fields": [
- {
- "name": "DISK",
- "integer": 1
- },
- {
- "name": "SSD",
- "integer": 2
- },
- {
- "name": "ARCHIVE",
- "integer": 3
- },
- {
- "name": "RAM_DISK",
- "integer": 4
- }
- ]
- },
{
"name": "BucketLayoutProto",
"enum_fields": [
@@ -3071,7 +3050,7 @@
{
"id": 5,
"name": "storageType",
- "type": "StorageTypeProto",
+ "type": "hadoop.hdds.StorageTypeProto",
"required": true,
"options": [
{
@@ -3330,7 +3309,7 @@
{
"id": 6,
"name": "storageType",
- "type": "StorageTypeProto",
+ "type": "hadoop.hdds.StorageTypeProto",
"optional": true
},
{
@@ -8330,4 +8309,4 @@
}
}
]
-}
\ No newline at end of file
+}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
index f6587010c0..cae1a33b70 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
@@ -38,6 +38,7 @@
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -468,7 +469,7 @@ private OMBucketCreateResponse createBucket(String
volumeName,
BucketInfo.Builder bucketInfo =
newBucketInfoBuilder(bucketName, volumeName)
- .setStorageType(OzoneManagerProtocolProtos.StorageTypeProto.DISK);
+ .setStorageType(StorageTypeProto.DISK);
OzoneManagerProtocolProtos.OMRequest omRequest =
OMRequestTestUtils.newCreateBucketRequest(bucketInfo).build();
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
index 1ce42bacd9..021a0a16d1 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
@@ -741,7 +741,7 @@ public static BucketInfo.Builder newBucketInfoBuilder(
return BucketInfo.newBuilder()
.setBucketName(bucketName)
.setVolumeName(volumeName)
- .setStorageType(OzoneManagerProtocolProtos.StorageTypeProto.SSD)
+ .setStorageType(HddsProtos.StorageTypeProto.SSD)
.setIsVersionEnabled(false)
.addAllMetadata(getMetadataList());
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java
index 49bfe7555c..138f6f2812 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java
@@ -33,6 +33,7 @@
import java.nio.file.Path;
import java.util.UUID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -95,7 +96,7 @@ public void testUserInfoInCaseOfHadoopTransport() throws
Exception {
BucketInfo.Builder bucketInfo =
newBucketInfoBuilder(bucketName, volumeName)
.setIsVersionEnabled(true)
-
.setStorageType(OzoneManagerProtocolProtos.StorageTypeProto.DISK);
+ .setStorageType(StorageTypeProto.DISK);
OMRequest omRequest = newCreateBucketRequest(bucketInfo).build();
OMBucketCreateRequest omBucketCreateRequest =
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
index 512664ad8d..f8f73c0979 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
@@ -30,6 +30,7 @@
import java.util.UUID;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -41,7 +42,6 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.StorageTypeProto;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
import org.junit.jupiter.api.Test;
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index 9bdd5d82b2..e2d3775f36 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -78,6 +78,7 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.PipelineID;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
@@ -86,7 +87,6 @@
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java
index 5537d6b5ce..c7d3f91e44 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java
@@ -49,6 +49,7 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.PipelineID;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto.Builder;
@@ -57,7 +58,6 @@
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java
index 455413d10e..5ee00a4239 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java
@@ -51,6 +51,7 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
@@ -59,7 +60,6 @@
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]