This is an automated email from the ASF dual-hosted git repository.
sumitagrawal pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new db4b4367617 HDDS-13613. Remove calculatePipelineBytesWritten from DN
code path. (#8978)
db4b4367617 is described below
commit db4b43676172ef03e73ad9a4e2ddf7e9490ac824
Author: Aswin Shakil Balasubramanian <[email protected]>
AuthorDate: Wed Sep 10 23:24:25 2025 -0700
HDDS-13613. Remove calculatePipelineBytesWritten from DN code path. (#8978)
---
.../transport/server/ratis/XceiverServerRatis.java | 15 ---
.../proto/ScmServerDatanodeHeartbeatProtocol.proto | 2 +-
.../hdds/scm/pipeline/PipelineReportHandler.java | 3 -
.../hdds/scm/pipeline/SCMPipelineMetrics.java | 27 -----
.../TestOneReplicaPipelineSafeModeRule.java | 1 -
.../hdds/scm/safemode/TestSCMSafeModeManager.java | 1 -
.../TestSCMPipelineBytesWrittenMetrics.java | 135 ---------------------
7 files changed, 1 insertion(+), 183 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index eca17f46d71..6661823f9a1 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -77,8 +77,6 @@
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
import
org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
@@ -778,18 +776,6 @@ public boolean isExist(HddsProtos.PipelineID pipelineId) {
RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineId).getId()));
}
- private long calculatePipelineBytesWritten(HddsProtos.PipelineID pipelineID)
{
- long bytesWritten = 0;
- for (Container<?> container : containerController.getContainers()) {
- ContainerData containerData = container.getContainerData();
- if (containerData.getOriginPipelineId()
- .compareTo(pipelineID.getId()) == 0) {
- bytesWritten += containerData.getStatistics().getWriteBytes();
- }
- }
- return bytesWritten;
- }
-
@Override
public List<PipelineReport> getPipelineReport() {
try {
@@ -803,7 +789,6 @@ public List<PipelineReport> getPipelineReport() {
reports.add(PipelineReport.newBuilder()
.setPipelineID(pipelineID)
.setIsLeader(isLeader)
- .setBytesWritten(calculatePipelineBytesWritten(pipelineID))
.build());
}
return reports;
diff --git
a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
index f7608b28496..752a62bd339 100644
---
a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
+++
b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
@@ -269,7 +269,7 @@ message ContainerAction {
message PipelineReport {
required PipelineID pipelineID = 1;
required bool isLeader = 2;
- optional uint64 bytesWritten = 3;
+ optional uint64 bytesWritten = 3 [deprecated = true];
}
message PipelineReportsProto {
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
index 91c58c754c8..50fbc5e492b 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
@@ -51,7 +51,6 @@ public class PipelineReportHandler implements
private final PipelineManager pipelineManager;
private final SafeModeManager scmSafeModeManager;
private final SCMContext scmContext;
- private final SCMPipelineMetrics metrics;
public PipelineReportHandler(SafeModeManager scmSafeModeManager,
PipelineManager pipelineManager,
@@ -61,7 +60,6 @@ public PipelineReportHandler(SafeModeManager
scmSafeModeManager,
this.scmSafeModeManager = scmSafeModeManager;
this.pipelineManager = pipelineManager;
this.scmContext = scmContext;
- this.metrics = SCMPipelineMetrics.create();
}
@Override
@@ -154,7 +152,6 @@ protected void setPipelineLeaderId(PipelineReport report,
RatisReplicationConfig.hasFactor(pipeline.getReplicationConfig(),
ReplicationFactor.ONE)) {
pipeline.setLeaderId(dn.getID());
- metrics.incNumPipelineBytesWritten(pipeline, report.getBytesWritten());
}
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java
index 65d781943b8..3ba1df1a806 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java
@@ -58,13 +58,11 @@ public final class SCMPipelineMetrics implements
MetricsSource {
private @Metric MutableCounterLong numPipelineContainSameDatanodes;
private @Metric MutableRate pipelineCreationLatencyNs;
private final Map<PipelineID, MutableCounterLong> numBlocksAllocated;
- private final Map<PipelineID, MutableCounterLong> numBytesWritten;
/** Private constructor. */
private SCMPipelineMetrics() {
this.registry = new MetricsRegistry(SOURCE_NAME);
numBlocksAllocated = new ConcurrentHashMap<>();
- numBytesWritten = new ConcurrentHashMap<>();
}
/**
@@ -104,8 +102,6 @@ public void getMetrics(MetricsCollector collector, boolean
all) {
numPipelineReportProcessingFailed.snapshot(recordBuilder, true);
numPipelineContainSameDatanodes.snapshot(recordBuilder, true);
pipelineCreationLatencyNs.snapshot(recordBuilder, true);
- numBytesWritten
- .forEach((pid, metric) -> metric.snapshot(recordBuilder, true));
numBlocksAllocated
.forEach((pid, metric) -> metric.snapshot(recordBuilder, true));
}
@@ -114,7 +110,6 @@ void createPerPipelineMetrics(Pipeline pipeline) {
numBlocksAllocated.put(pipeline.getId(), new MutableCounterLong(Interns
.info(getBlockAllocationMetricName(pipeline),
"Number of blocks allocated in pipeline " + pipeline.getId()),
0L));
- numBytesWritten.put(pipeline.getId(), bytesWrittenCounter(pipeline, 0L));
}
public static String getBlockAllocationMetricName(Pipeline pipeline) {
@@ -122,14 +117,8 @@ public static String getBlockAllocationMetricName(Pipeline
pipeline) {
.getReplicationConfig().toString() + "-" + pipeline.getId().getId();
}
- public static String getBytesWrittenMetricName(Pipeline pipeline) {
- return "NumPipelineBytesWritten-" + pipeline.getType() + "-" + pipeline
- .getReplicationConfig().toString() + "-" + pipeline.getId().getId();
- }
-
void removePipelineMetrics(PipelineID pipelineID) {
numBlocksAllocated.remove(pipelineID);
- numBytesWritten.remove(pipelineID);
}
/**
@@ -155,22 +144,6 @@ void incNumPipelineCreated() {
numPipelineCreated.incr();
}
- /**
- * Increments the number of total bytes that write into the pipeline.
- */
- void incNumPipelineBytesWritten(Pipeline pipeline, long bytes) {
- numBytesWritten.computeIfPresent(pipeline.getId(),
- (k, v) -> bytesWrittenCounter(pipeline, bytes));
- }
-
- private static MutableCounterLong bytesWrittenCounter(
- Pipeline pipeline, long bytes) {
- return new MutableCounterLong(
- Interns.info(getBytesWrittenMetricName(pipeline),
- "Number of bytes written into pipeline " + pipeline.getId()),
- bytes);
- }
-
/**
* Increments number of failed pipeline creation count.
*/
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java
index 49a346660a8..2836ec358fb 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java
@@ -224,7 +224,6 @@ private void firePipelineEvent(List<Pipeline> pipelines) {
reports.add(PipelineReport.newBuilder()
.setPipelineID(pipelineID)
.setIsLeader(true)
- .setBytesWritten(0)
.build());
}
PipelineReportsProto.Builder pipelineReportsProto =
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
index ea5f81ce7da..d93a93ebb6f 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
@@ -414,7 +414,6 @@ private void firePipelineEvent(PipelineManager
pipelineManager,
.PipelineReport.newBuilder()
.setPipelineID(pipelineID)
.setIsLeader(true)
- .setBytesWritten(0)
.build());
StorageContainerDatanodeProtocolProtos
.PipelineReportsProto.Builder pipelineReportsProto =
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
deleted file mode 100644
index 3623da12411..00000000000
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipeline;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
-import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
-import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE;
-import static org.apache.ozone.test.MetricsAsserts.getLongCounter;
-import static org.apache.ozone.test.MetricsAsserts.getMetrics;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneKeyDetails;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.ozone.test.GenericTestUtils;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-/**
- * Test cases to verify the SCM pipeline bytesWritten metrics.
- */
-public class TestSCMPipelineBytesWrittenMetrics {
-
- private MiniOzoneCluster cluster;
- private OzoneClient client;
-
- @BeforeEach
- public void setup() throws Exception {
- OzoneConfiguration conf = new OzoneConfiguration();
- conf.setBoolean(OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false);
- conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 1);
- conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 10, TimeUnit.SECONDS);
-
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(3)
- .build();
- cluster.waitForClusterToBeReady();
- client = cluster.newClient();
- }
-
- private void writeNumBytes(int numBytes) throws Exception {
- ObjectStore store = client.getObjectStore();
-
- String volumeName = UUID.randomUUID().toString();
- String bucketName = UUID.randomUUID().toString();
-
- String value = RandomStringUtils.secure().nextAlphabetic(numBytes);
- store.createVolume(volumeName);
- OzoneVolume volume = store.getVolume(volumeName);
- volume.createBucket(bucketName);
- OzoneBucket bucket = volume.getBucket(bucketName);
-
- String keyName = UUID.randomUUID().toString();
-
- OzoneOutputStream out = bucket
- .createKey(keyName, value.getBytes(UTF_8).length,
ReplicationType.RATIS,
- ReplicationFactor.THREE, new HashMap<>());
- out.write(value.getBytes(UTF_8));
- out.close();
-
- OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
- builder.setVolumeName(volumeName).setBucketName(bucketName)
- .setKeyName(keyName);
-
- OzoneKeyDetails keyDetails = bucket.getKey(keyName);
- assertEquals(keyName, keyDetails.getName());
- assertEquals(value.getBytes(UTF_8).length, keyDetails
- .getOzoneKeyLocations().get(0).getLength());
- }
-
- @Test
- public void testNumBytesWritten() throws Exception {
- checkBytesWritten(0);
- int bytesWritten = 1000;
- writeNumBytes(bytesWritten);
- checkBytesWritten(bytesWritten);
-
- }
-
- private void checkBytesWritten(long expectedBytesWritten) throws Exception {
- // As only 3 datanodes and ozone.scm.pipeline.creation.auto.factor.one is
- // false, so only pipeline in the system.
- List<Pipeline> pipelines = cluster.getStorageContainerManager()
- .getPipelineManager().getPipelines();
-
- assertEquals(1, pipelines.size());
- Pipeline pipeline = pipelines.get(0);
-
- final String metricName =
- SCMPipelineMetrics.getBytesWrittenMetricName(pipeline);
- GenericTestUtils.waitFor(() -> {
- MetricsRecordBuilder metrics = getMetrics(
- SCMPipelineMetrics.class.getSimpleName());
- return expectedBytesWritten == getLongCounter(metricName, metrics);
- }, 500, 300000);
- }
-
- @AfterEach
- public void teardown() {
- IOUtils.closeQuietly(client);
- cluster.shutdown();
- }
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]