This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 7dfd8c1a78 HDDS-12060. Replace System.currentTimeMillis() with 
Time.monotonicNow() for duration calculation (#8096)
7dfd8c1a78 is described below

commit 7dfd8c1a7889b5947c87f81d1c4d24a30bf629f6
Author: Chia-Chuan Yu <[email protected]>
AuthorDate: Fri Apr 11 23:17:42 2025 +0800

    HDDS-12060. Replace System.currentTimeMillis() with Time.monotonicNow() for 
duration calculation (#8096)
---
 .../java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java    |  5 +++--
 .../java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java   |  5 +++--
 .../org/apache/hadoop/ozone/util/ShutdownHookManager.java     |  5 +++--
 .../hadoop/ozone/container/common/utils/HddsVolumeUtil.java   |  5 +++--
 .../hadoop/ozone/container/ozoneimpl/OzoneContainer.java      |  5 +++--
 .../hadoop/ozone/container/keyvalue/TestKeyValueHandler.java  |  3 ++-
 .../hadoop/ozone/container/ozoneimpl/TestContainerReader.java |  5 +++--
 .../replication/ReplicationSupervisorScheduling.java          |  5 +++--
 .../org/apache/hadoop/hdds/server/events/EventWatcher.java    |  5 +++--
 .../org/apache/hadoop/hdds/utils/ProtocolMessageMetrics.java  |  5 +++--
 .../main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java   |  5 +++--
 .../apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java    |  5 +++--
 .../apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java   |  5 +++--
 .../src/test/java/org/apache/ozone/test/LambdaTestUtils.java  |  5 +++--
 .../apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java    |  5 +++--
 .../hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java | 11 ++++++-----
 .../hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java    |  5 +++--
 .../ozone/lib/service/hadoop/FileSystemAccessService.java     |  5 +++--
 .../java/org/apache/hadoop/fs/contract/ContractTestUtils.java |  5 +++--
 .../common/statemachine/commandhandler/TestBlockDeletion.java |  5 +++--
 .../hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java       |  5 +++--
 .../ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java      |  7 ++++---
 .../hadoop/ozone/om/request/upgrade/OMPrepareRequest.java     |  5 +++--
 .../org/apache/hadoop/ozone/om/service/QuotaRepairTask.java   |  5 +++--
 .../main/java/org/apache/hadoop/ozone/recon/ReconUtils.java   |  5 +++--
 .../hadoop/ozone/recon/tasks/ContainerKeyMapperHelper.java    |  5 +++--
 .../hadoop/ozone/recon/tasks/FileSizeCountTaskHelper.java     |  9 +++++----
 .../apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java   |  5 +++--
 .../org/apache/hadoop/ozone/admin/om/PrepareSubCommand.java   |  5 +++--
 .../org/apache/hadoop/ozone/freon/BaseFreonGenerator.java     |  7 ++++---
 30 files changed, 96 insertions(+), 66 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index ed9ec6271e..3b774c6dfd 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -60,6 +60,7 @@
 import org.apache.hadoop.ozone.ClientVersion;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.util.Time;
 import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
 import org.apache.ratis.thirdparty.io.grpc.Status;
 import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts;
@@ -571,7 +572,7 @@ public XceiverClientReply sendCommandAsync(
     final CompletableFuture<ContainerCommandResponseProto> replyFuture =
         new CompletableFuture<>();
     semaphore.acquire();
-    long requestTime = System.currentTimeMillis();
+    long requestTime = Time.monotonicNow();
     metrics.incrPendingContainerOpsMetrics(request.getCmdType());
 
     // create a new grpc message stream pair for each call.
@@ -601,7 +602,7 @@ public void onCompleted() {
 
               private void decreasePendingMetricsAndReleaseSemaphore() {
                 metrics.decrPendingContainerOpsMetrics(request.getCmdType());
-                long cost = System.currentTimeMillis() - requestTime;
+                long cost = Time.monotonicNow() - requestTime;
                 metrics.addContainerOpsLatency(request.getCmdType(), cost);
                 if (LOG.isDebugEnabled()) {
                   LOG.debug("Executed command {} on datanode {}, cost = {}, 
cmdType = {}",
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index fa2bdee0cf..f185c3e855 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -47,6 +47,7 @@
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.security.SecurityConfig;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.util.Time;
 import org.apache.ratis.client.RaftClient;
 import org.apache.ratis.client.api.DataStreamApi;
 import org.apache.ratis.grpc.GrpcTlsConfig;
@@ -363,7 +364,7 @@ private XceiverClientReply handleFailedAllCommit(long 
index, Collection<CommitIn
   public XceiverClientReply sendCommandAsync(
       ContainerCommandRequestProto request) {
     XceiverClientReply asyncReply = new XceiverClientReply(null);
-    long requestTime = System.currentTimeMillis();
+    long requestTime = Time.monotonicNow();
     CompletableFuture<RaftClientReply> raftClientReply =
         sendRequestAsync(request);
     metrics.incrPendingContainerOpsMetrics(request.getCmdType());
@@ -376,7 +377,7 @@ public XceiverClientReply sendCommandAsync(
           }
           metrics.decrPendingContainerOpsMetrics(request.getCmdType());
           metrics.addContainerOpsLatency(request.getCmdType(),
-              System.currentTimeMillis() - requestTime);
+              Time.monotonicNow() - requestTime);
         }).thenApply(reply -> {
           try {
             if (!reply.isSuccess()) {
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java
index e533b23fd5..1ce378c62e 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java
@@ -39,6 +39,7 @@
 import org.apache.hadoop.hdds.annotation.InterfaceStability;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -95,9 +96,9 @@ public void run() {
                 LOG.info("Shutdown process invoked a second time: ignoring");
                 return;
               }
-              long started = System.currentTimeMillis();
+              long started = Time.monotonicNow();
               int timeoutCount = MGR.executeShutdown();
-              long ended = System.currentTimeMillis();
+              long ended = Time.monotonicNow();
               LOG.debug(String.format(
                   "Completed shutdown in %.3f seconds; Timeouts: %d",
                   (ended - started) / 1000.0, timeoutCount));
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
index 6b4ca22ceb..6ea9e87541 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
@@ -33,6 +33,7 @@
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 
 /**
@@ -89,7 +90,7 @@ public static void loadAllHddsVolumeDbStore(MutableVolumeSet 
hddsVolumeSet,
     List<CompletableFuture<Void>> futures = new ArrayList<>();
     List<HddsVolume> hddsVolumes = StorageVolumeUtil.getHddsVolumesList(
         hddsVolumeSet.getVolumesList());
-    long start = System.currentTimeMillis();
+    long start = Time.monotonicNow();
     for (HddsVolume volume : hddsVolumes) {
       futures.add(CompletableFuture.runAsync(
           () -> loadVolume(volume, readOnly, logger)));
@@ -99,7 +100,7 @@ public static void loadAllHddsVolumeDbStore(MutableVolumeSet 
hddsVolumeSet,
     }
     if (logger != null) {
       logger.info("Load {} volumes DbStore cost: {}ms", hddsVolumes.size(),
-          System.currentTimeMillis() - start);
+          Time.monotonicNow() - start);
     }
   }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 22d040c6e8..cde382ede8 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -96,6 +96,7 @@
 import 
org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig;
 import 
org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures.SchemaV3;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Timer;
 import org.apache.ratis.grpc.GrpcTlsConfig;
 import org.slf4j.Logger;
@@ -318,7 +319,7 @@ public void buildContainerSet() throws IOException {
     Iterator<StorageVolume> volumeSetIterator = volumeSet.getVolumesList()
         .iterator();
     ArrayList<Thread> volumeThreads = new ArrayList<>();
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.monotonicNow();
 
     // Load container inspectors that may be triggered at startup based on
     // system properties set. These can inspect and possibly repair
@@ -360,7 +361,7 @@ public void buildContainerSet() throws IOException {
     ContainerInspectorUtil.unload();
 
     LOG.info("Build ContainerSet costs {}s",
-        (System.currentTimeMillis() - startTime) / 1000);
+        (Time.monotonicNow() - startTime) / 1000);
   }
 
   /**
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index de26f0fbbf..6b0e84295c 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -68,6 +68,7 @@
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
+import org.apache.hadoop.util.Time;
 import org.apache.ozone.test.GenericTestUtils.LogCapturer;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
@@ -442,7 +443,7 @@ public void testDeleteContainerTimeout() throws IOException 
{
     final ContainerSet containerSet = newContainerSet();
     final MutableVolumeSet volumeSet = mock(MutableVolumeSet.class);
     final Clock clock = mock(Clock.class);
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.monotonicNow();
 
     DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
     when(clock.millis())
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
index e1ca0866e3..4688c46214 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
@@ -68,6 +68,7 @@
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
+import org.apache.hadoop.util.Time;
 import org.apache.ozone.test.GenericTestUtils.LogCapturer;
 import org.apache.ratis.util.FileUtils;
 import org.junit.jupiter.api.AfterEach;
@@ -450,7 +451,7 @@ public void 
testMultipleContainerReader(ContainerTestVersionInfo versionInfo)
           (HddsVolume) volumes.get(i), containerSet, conf, true);
       threads[i] = new Thread(containerReaders[i]);
     }
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.monotonicNow();
     for (int i = 0; i < volumeNum; i++) {
       threads[i].start();
     }
@@ -458,7 +459,7 @@ public void 
testMultipleContainerReader(ContainerTestVersionInfo versionInfo)
       threads[i].join();
     }
     System.out.println("Open " + volumeNum + " Volume with " + containerCount +
-        " costs " + (System.currentTimeMillis() - startTime) / 1000 + "s");
+        " costs " + (Time.monotonicNow() - startTime) / 1000 + "s");
     assertEquals(containerCount,
         containerSet.getContainerMap().entrySet().size());
     assertEquals(volumeSet.getFailedVolumesList().size(), 0);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java
index f2e6ba87cd..88620156a5 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java
@@ -28,6 +28,7 @@
 import java.util.UUID;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.util.Time;
 import org.junit.jupiter.api.Test;
 
 /**
@@ -104,7 +105,7 @@ public void test() throws InterruptedException {
 
     ReplicationSupervisor rs = ReplicationSupervisor.newBuilder().build();
 
-    final long start = System.currentTimeMillis();
+    final long start = Time.monotonicNow();
 
     //schedule 100 container replication
     for (int i = 0; i < 100; i++) {
@@ -114,7 +115,7 @@ public void test() throws InterruptedException {
       rs.addTask(new ReplicationTask(fromSources(i, sources), replicator));
     }
     rs.shutdownAfterFinish();
-    final long executionTime = System.currentTimeMillis() - start;
+    final long executionTime = Time.monotonicNow() - start;
     System.out.println(executionTime);
     assertThat(executionTime)
         .withFailMessage("Execution was too slow : " + executionTime + " ms")
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
index 2b3e13e354..6f48181dcc 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
@@ -33,6 +33,7 @@
 import org.apache.hadoop.ozone.lease.LeaseExpiredException;
 import org.apache.hadoop.ozone.lease.LeaseManager;
 import org.apache.hadoop.ozone.lease.LeaseNotFoundException;
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -116,7 +117,7 @@ private synchronized void handleStartEvent(TIMEOUT_PAYLOAD 
payload,
                                              EventPublisher publisher) {
     metrics.incrementTrackedEvents();
     long identifier = payload.getId();
-    startTrackingTimes.put(identifier, System.currentTimeMillis());
+    startTrackingTimes.put(identifier, Time.monotonicNow());
 
     trackedEventsByID.put(identifier, payload);
     trackedEvents.add(payload);
@@ -139,7 +140,7 @@ protected synchronized void 
handleCompletion(COMPLETION_PAYLOAD
     if (trackedEvents.remove(payload)) {
       metrics.incrementCompletedEvents();
       long originalTime = startTrackingTimes.remove(id);
-      metrics.updateFinishingTime(System.currentTimeMillis() - originalTime);
+      metrics.updateFinishingTime(Time.monotonicNow() - originalTime);
       onFinished(publisher, payload);
     }
   }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/ProtocolMessageMetrics.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/ProtocolMessageMetrics.java
index f772ea8c92..09e154f9aa 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/ProtocolMessageMetrics.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/ProtocolMessageMetrics.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.util.Time;
 import org.apache.ratis.util.UncheckedAutoCloseable;
 
 /**
@@ -68,12 +69,12 @@ public void increment(KEY key, long duration) {
   }
 
   public UncheckedAutoCloseable measure(KEY key) {
-    final long startTime = System.currentTimeMillis();
+    final long startTime = Time.monotonicNow();
     concurrency.incrementAndGet();
     return () -> {
       concurrency.decrementAndGet();
       counters.get(key).incrementAndGet();
-      elapsedTimes.get(key).addAndGet(System.currentTimeMillis() - startTime);
+      elapsedTimes.get(key).addAndGet(Time.monotonicNow() - startTime);
     };
   }
 
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
index 5c5247e011..9ce8e03f69 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
@@ -27,6 +27,7 @@
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
 import org.apache.hadoop.hdds.utils.db.RocksDatabase.ColumnFamily;
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -297,7 +298,7 @@ private List<KeyValue<byte[], byte[]>> getRangeKVs(byte[] 
startKey,
       int count, boolean sequential, byte[] prefix,
       MetadataKeyFilters.MetadataKeyFilter... filters)
       throws IOException, IllegalArgumentException {
-    long start = System.currentTimeMillis();
+    long start = Time.monotonicNow();
 
     if (count < 0) {
       throw new IllegalArgumentException(
@@ -342,7 +343,7 @@ && get(startKey) == null) {
         }
       }
     } finally {
-      long end = System.currentTimeMillis();
+      long end = Time.monotonicNow();
       long timeConsumed = end - start;
       if (LOG.isDebugEnabled()) {
         if (filters != null) {
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java
index 504bda7457..627210ca9d 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java
@@ -50,6 +50,7 @@
 import org.apache.hadoop.ozone.upgrade.UpgradeException.ResultCodes;
 import org.apache.hadoop.ozone.upgrade.UpgradeFinalization.Status;
 import org.apache.hadoop.ozone.upgrade.UpgradeFinalization.StatusAndMessages;
+import org.apache.hadoop.util.Time;
 import org.apache.ratis.protocol.exceptions.NotLeaderException;
 
 /**
@@ -169,9 +170,9 @@ public void finalizeAndWaitForCompletion(
     }
 
     boolean success = false;
-    long endTime = System.currentTimeMillis() +
+    long endTime = Time.monotonicNow() +
         TimeUnit.SECONDS.toMillis(maxTimeToWaitInSeconds);
-    while (System.currentTimeMillis() < endTime) {
+    while (Time.monotonicNow() < endTime) {
       try {
         response = reportStatus(upgradeClientID, false);
         LOG.info("Finalization Messages : {} ", response.msgs());
diff --git 
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
 
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
index bae0fcd536..c09c5af85d 100644
--- 
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
+++ 
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
@@ -89,6 +89,7 @@
 import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader;
 import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
+import org.apache.hadoop.util.Time;
 import org.apache.ozone.compaction.log.CompactionFileInfo;
 import org.apache.ozone.compaction.log.CompactionLogEntry;
 import org.apache.ozone.rocksdb.util.RdbUtil;
@@ -758,7 +759,7 @@ void diffAllSnapshots(RocksDBCheckpointDiffer differ)
   private void createCheckpoint(ManagedRocksDB rocksDB) throws 
RocksDBException {
 
     LOG.trace("Current time: " + System.currentTimeMillis());
-    long t1 = System.currentTimeMillis();
+    long t1 = Time.monotonicNow();
 
     final long snapshotGeneration = rocksDB.get().getLatestSequenceNumber();
     final String cpPath = CP_PATH_PREFIX + snapshotGeneration;
@@ -780,7 +781,7 @@ private void createCheckpoint(ManagedRocksDB rocksDB) 
throws RocksDBException {
                 colHandle));
     this.snapshots.add(currentSnapshot);
 
-    long t2 = System.currentTimeMillis();
+    long t2 = Time.monotonicNow();
     LOG.trace("Current time: " + t2);
     LOG.debug("Time elapsed: " + (t2 - t1) + " ms");
   }
diff --git 
a/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/LambdaTestUtils.java
 
b/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/LambdaTestUtils.java
index da5a08f615..b3529ff10b 100644
--- 
a/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/LambdaTestUtils.java
+++ 
b/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/LambdaTestUtils.java
@@ -20,6 +20,7 @@
 import com.google.common.base.Preconditions;
 import java.util.concurrent.Callable;
 import java.util.concurrent.TimeoutException;
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -105,7 +106,7 @@ public static int await(int timeoutMillis,
         "timeoutMillis must be >= 0");
     Preconditions.checkNotNull(timeoutHandler);
 
-    final long endTime = System.currentTimeMillis() + timeoutMillis;
+    final long endTime = Time.monotonicNow() + timeoutMillis;
     Throwable ex = null;
     boolean running = true;
     int iterations = 0;
@@ -126,7 +127,7 @@ public static int await(int timeoutMillis,
         LOG.debug("await() iteration {}", iterations, e);
         ex = e;
       }
-      running = System.currentTimeMillis() < endTime;
+      running = Time.monotonicNow() < endTime;
       if (running) {
         int sleeptime = retry.call();
         if (sleeptime >= 0) {
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java
index bb225aba58..9690bbf8d4 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java
@@ -21,6 +21,7 @@
 import java.util.concurrent.TimeoutException;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.util.Time;
 import picocli.CommandLine.Command;
 import picocli.CommandLine.Mixin;
 import picocli.CommandLine.Option;
@@ -48,7 +49,7 @@ public class SafeModeWaitSubcommand implements Callable<Void> 
{
 
   @Override
   public Void call() throws Exception {
-    startTestTime = System.currentTimeMillis();
+    startTestTime = Time.monotonicNow();
 
     while (getRemainingTimeInSec() > 0) {
       try (ScmClient scmClient = scmOption.createScmClient()) {
@@ -85,6 +86,6 @@ public Void call() throws Exception {
   }
 
   private long getRemainingTimeInSec() {
-    return timeoutSeconds - (System.currentTimeMillis() - startTestTime) / 
1000;
+    return timeoutSeconds - (Time.monotonicNow() - startTestTime) / 1000;
   }
 }
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java
index ae9ec43661..6b2f7818f8 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,7 +49,7 @@ public List<Result> run(OzoneConfiguration configuration,
       List<HddsVolume> volumes) throws IOException {
     List<Result> results = new ArrayList<>();
     Map<HddsVolume, CompletableFuture<Result>> volumeFutures = new HashMap<>();
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.monotonicNow();
 
     LOG.info("Start to upgrade {} volume(s)", volumes.size());
     for (StorageVolume volume : volumes) {
@@ -76,7 +77,7 @@ public List<Result> run(OzoneConfiguration configuration,
     }
 
     LOG.info("It took {}ms to finish all volume upgrade.",
-        (System.currentTimeMillis() - startTime));
+        (Time.monotonicNow() - startTime));
     return results;
   }
 
@@ -91,7 +92,7 @@ public DatanodeStoreSchemaThreeImpl getDBStore(HddsVolume 
volume) {
   public static class Result {
     private Map<Long, UpgradeTask.UpgradeContainerResult> resultMap;
     private final HddsVolume hddsVolume;
-    private final long startTimeMs = System.currentTimeMillis();
+    private final long startTimeMs = Time.monotonicNow();
     private long endTimeMs = 0L;
     private Exception e = null;
     private Status status = Status.FAIL;
@@ -124,12 +125,12 @@ public boolean isSuccess() {
     }
 
     public void success() {
-      this.endTimeMs = System.currentTimeMillis();
+      this.endTimeMs = Time.monotonicNow();
       this.status = Status.SUCCESS;
     }
 
     public void fail(Exception exception) {
-      this.endTimeMs = System.currentTimeMillis();
+      this.endTimeMs = Time.monotonicNow();
       this.status = Status.FAIL;
       this.e = exception;
     }
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java
index aff031a090..262abc57fd 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java
@@ -55,6 +55,7 @@
 import 
org.apache.hadoop.ozone.container.metadata.DatanodeSchemaTwoDBDefinition;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -383,7 +384,7 @@ public static class UpgradeContainerResult {
     private final ContainerData originContainerData;
     private ContainerData newContainerData;
     private long totalRow = 0L;
-    private final long startTimeMs = System.currentTimeMillis();
+    private final long startTimeMs = Time.monotonicNow();
     private long endTimeMs = 0L;
     private Status status;
 
@@ -431,7 +432,7 @@ public void setNewContainerFilePath(String 
newContainerFilePath) {
 
     public void success(long rowCount) {
       this.totalRow = rowCount;
-      this.endTimeMs = System.currentTimeMillis();
+      this.endTimeMs = Time.monotonicNow();
       this.status = Status.SUCCESS;
     }
 
diff --git 
a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java
 
b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java
index a8edd3908d..c4dfc28df4 100644
--- 
a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java
+++ 
b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java
@@ -38,6 +38,7 @@
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.ozone.lib.server.BaseService;
 import org.apache.ozone.lib.server.ServiceException;
@@ -112,7 +113,7 @@ synchronized void release() throws IOException {
           fs = null;
           lastUse = -1;
         } else {
-          lastUse = System.currentTimeMillis();
+          lastUse = Time.monotonicNow();
         }
       }
     }
@@ -125,7 +126,7 @@ synchronized void release() throws IOException {
     synchronized boolean purgeIfIdle() throws IOException {
       boolean ret = false;
       if (count == 0 && lastUse != -1 &&
-          (System.currentTimeMillis() - lastUse) > timeout) {
+          (Time.monotonicNow() - lastUse) > timeout) {
         fs.close();
         fs = null;
         lastUse = -1;
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index f0e4812615..a4d7c02b4a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -53,6 +53,7 @@
 import org.apache.hadoop.fs.StreamCapabilities;
 import org.apache.hadoop.io.ByteBufferPool;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.functional.FutureIO;
 import org.apache.hadoop.util.functional.RemoteIterators;
 import org.assertj.core.api.Assertions;
@@ -1498,13 +1499,13 @@ public static boolean 
containsDuplicates(Collection<Path> paths) {
    */
   public static FileStatus getFileStatusEventually(FileSystem fs, Path path,
       int timeout) throws IOException, InterruptedException {
-    long endTime = System.currentTimeMillis() + timeout;
+    long endTime = Time.monotonicNow() + timeout;
     FileStatus stat = null;
     do {
       try {
         stat = fs.getFileStatus(path);
       } catch (FileNotFoundException e) {
-        if (System.currentTimeMillis() > endTime) {
+        if (Time.monotonicNow() > endTime) {
           // timeout, raise an assert with more diagnostics
           assertPathExists(fs, "Path not found after " + timeout + " mS", 
path);
         } else {
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index fe56a23a66..1acb1bd90c 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -93,6 +93,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+import org.apache.hadoop.util.Time;
 import org.apache.ozone.test.GenericTestUtils;
 import org.apache.ozone.test.GenericTestUtils.LogCapturer;
 import org.apache.ozone.test.tag.Flaky;
@@ -821,7 +822,7 @@ public void testBlockDeleteCommandParallelProcess() throws 
Exception {
 
     // Wait for block delete command sent from OM
     OzoneTestUtils.flushAndWaitForDeletedBlockLog(scm);
-    long start = System.currentTimeMillis();
+    long start = Time.monotonicNow();
     // Wait for all blocks been deleted.
     GenericTestUtils.waitFor(() -> {
       try {
@@ -833,7 +834,7 @@ public void testBlockDeleteCommandParallelProcess() throws 
Exception {
       }
       return false;
     }, 100, 30000);
-    long end = System.currentTimeMillis();
+    long end = Time.monotonicNow();
     System.out.println("Block deletion costs " + (end - start) + "ms");
   }
 }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java
index a3ad7c6a48..a837cb7851 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java
@@ -33,6 +33,7 @@
 import 
org.apache.hadoop.ozone.freon.TestOmBucketReadWriteFileOps.ParameterBuilder;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.lock.OMLockMetrics;
+import org.apache.hadoop.util.Time;
 import org.apache.ozone.test.NonHATests;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
@@ -115,7 +116,7 @@ void testOmBucketReadWriteKeyOps(ParameterBuilder 
parameterBuilder) throws Excep
         
parameterBuilder.getBucketArgs().setBucketLayout(BucketLayout.OBJECT_STORE).build()
     );
 
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.monotonicNow();
     String om = cluster().getConf().get(OZONE_OM_ADDRESS_KEY);
     new Freon().getCmd().execute(
         "-D", OZONE_OM_ADDRESS_KEY + "=" + om,
@@ -132,7 +133,7 @@ void testOmBucketReadWriteKeyOps(ParameterBuilder 
parameterBuilder) throws Excep
         "-R", String.valueOf(parameterBuilder.getNumOfReadOperations()),
         "-W", String.valueOf(parameterBuilder.getNumOfWriteOperations()),
         "-n", String.valueOf(1));
-    long totalTime = System.currentTimeMillis() - startTime;
+    long totalTime = Time.monotonicNow() - startTime;
     LOG.info("Total Execution Time: " + totalTime);
 
     LOG.info("Started verifying OM bucket read/write ops key generation...");
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java
index 8c6cdd4a74..35ae4a60fe 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java
@@ -49,6 +49,7 @@
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.ozone.om.helpers.OMNodeDetails;
 import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -188,17 +189,17 @@ public static void downloadFileWithProgress(InputStream 
inputStream, File target
       byte[] buffer = new byte[8 * 1024];
       long totalBytesRead = 0;
       int bytesRead;
-      long lastLoggedTime = System.currentTimeMillis();
+      long lastLoggedTime = Time.monotonicNow();
 
       while ((bytesRead = inputStream.read(buffer)) != -1) {
         outputStream.write(buffer, 0, bytesRead);
         totalBytesRead += bytesRead;
 
         // Log progress every 30 seconds
-        if (System.currentTimeMillis() - lastLoggedTime >= 30000) {
+        if (Time.monotonicNow() - lastLoggedTime >= 30000) {
           LOG.info("Downloading '{}': {} KB downloaded so far...",
               targetFile.getName(), totalBytesRead / (1024));
-          lastLoggedTime = System.currentTimeMillis();
+          lastLoggedTime = Time.monotonicNow();
         }
       }
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java
index 05bb957f22..ef5ee2a4ea 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java
@@ -41,6 +41,7 @@
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareResponse;
+import org.apache.hadoop.util.Time;
 import org.apache.ratis.server.RaftServer;
 import org.apache.ratis.server.raftlog.RaftLog;
 import org.apache.ratis.statemachine.StateMachine;
@@ -173,7 +174,7 @@ private static long waitForLogIndex(long minOMDBFlushIndex,
       Duration flushTimeout, Duration flushCheckInterval)
       throws InterruptedException, IOException {
 
-    long endTime = System.currentTimeMillis() + flushTimeout.toMillis();
+    long endTime = Time.monotonicNow() + flushTimeout.toMillis();
 
     boolean omDBFlushed = false;
     boolean ratisStateMachineApplied = false;
@@ -193,7 +194,7 @@ private static long waitForLogIndex(long minOMDBFlushIndex,
             " to Ratis state machine.", om.getOMNodeId(), minOMDBFlushIndex,
         minRatisStateMachineIndex);
     while (!(omDBFlushed && ratisStateMachineApplied) &&
-        System.currentTimeMillis() < endTime) {
+        Time.monotonicNow() < endTime) {
       // Check OM DB.
       lastOMDBFlushIndex = om.getRatisSnapshotIndex();
       omDBFlushed = (lastOMDBFlushIndex >= minOMDBFlushIndex);
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java
index 35ffca1009..b35a95a2e8 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java
@@ -57,6 +57,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.Time;
 import org.apache.ratis.protocol.ClientId;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -346,7 +347,7 @@ private <VALUE> void recalculateUsages(
           prefixUsageMap, q, isRunning, haveValue)));
     }
     int count = 0;
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.monotonicNow();
     try (TableIterator<String, ? extends Table.KeyValue<String, VALUE>>
              keyIter = table.iterator()) {
       while (keyIter.hasNext()) {
@@ -363,7 +364,7 @@ private <VALUE> void recalculateUsages(
         f.get();
       }
       LOG.info("Recalculate {} completed, count {} time {}ms", strType,
-          count, (System.currentTimeMillis() - startTime));
+          count, (Time.monotonicNow() - startTime));
     } catch (IOException ex) {
       throw new UncheckedIOException(ex);
     } catch (InterruptedException ex) {
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
index a590b834d0..ba2ebe0a77 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
@@ -83,6 +83,7 @@
 import org.apache.hadoop.ozone.recon.scm.ReconContainerReportQueue;
 import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
 import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.util.Time;
 import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
 import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
 import org.jooq.Configuration;
@@ -393,12 +394,12 @@ private static void 
triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSu
     });
 
     executor.submit(() -> {
-      long startTime = System.currentTimeMillis();
+      long startTime = Time.monotonicNow();
       log.info("Rebuilding NSSummary tree...");
       try {
         reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager);
       } finally {
-        long endTime = System.currentTimeMillis();
+        long endTime = Time.monotonicNow();
         log.info("NSSummary tree rebuild completed in {} ms.", endTime - 
startTime);
       }
     });
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperHelper.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperHelper.java
index 7e5a02ff99..e786aa282a 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperHelper.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperHelper.java
@@ -40,6 +40,7 @@
 import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
 import org.apache.hadoop.ozone.recon.api.types.KeyPrefixContainer;
 import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -161,7 +162,7 @@ public static boolean process(OMUpdateEventBatch events,
     Map<Long, Long> containerKeyCountMap = new HashMap<>();
     // List of the deleted (container, key) pair's
     List<ContainerKeyPrefix> deletedKeyCountList = new ArrayList<>();
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.monotonicNow();
 
     while (eventIterator.hasNext()) {
       OMDBUpdateEvent<String, OmKeyInfo> omdbUpdateEvent = 
eventIterator.next();
@@ -211,7 +212,7 @@ public static boolean process(OMUpdateEventBatch events,
       return false;
     }
     LOG.debug("{} successfully processed {} OM DB update event(s) in {} 
milliseconds.",
-        taskName, eventCount, (System.currentTimeMillis() - startTime));
+        taskName, eventCount, (Time.monotonicNow() - startTime));
     return true;
   }
 
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTaskHelper.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTaskHelper.java
index f6d26373f0..b8f5302c62 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTaskHelper.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTaskHelper.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.recon.ReconConstants;
 import org.apache.hadoop.ozone.recon.ReconUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.ozone.recon.schema.generated.tables.daos.FileCountBySizeDao;
 import org.apache.ozone.recon.schema.generated.tables.pojos.FileCountBySize;
 import org.jooq.DSLContext;
@@ -90,7 +91,7 @@ public static ReconOmTask.TaskResult 
reprocess(OMMetadataManager omMetadataManag
                                                  String taskName) {
     LOG.info("Starting Reprocess for {}", taskName);
     Map<FileSizeCountKey, Long> fileSizeCountMap = new HashMap<>();
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.monotonicNow();
     truncateTableIfNeeded(dslContext);
     boolean status = reprocessBucketLayout(
         bucketLayout, omMetadataManager, fileSizeCountMap, dslContext, 
fileCountBySizeDao, taskName);
@@ -98,7 +99,7 @@ public static ReconOmTask.TaskResult 
reprocess(OMMetadataManager omMetadataManag
       return buildTaskResult(taskName, false);
     }
     writeCountsToDB(fileSizeCountMap, dslContext, fileCountBySizeDao);
-    long endTime = System.currentTimeMillis();
+    long endTime = Time.monotonicNow();
     LOG.info("{} completed Reprocess in {} ms.", taskName, (endTime - 
startTime));
     return buildTaskResult(taskName, true);
   }
@@ -160,7 +161,7 @@ public static ReconOmTask.TaskResult 
processEvents(OMUpdateEventBatch events,
                                                      String taskName) {
     Iterator<OMDBUpdateEvent> eventIterator = events.getIterator();
     Map<FileSizeCountKey, Long> fileSizeCountMap = new HashMap<>();
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.monotonicNow();
     while (eventIterator.hasNext()) {
       OMDBUpdateEvent<String, Object> omdbUpdateEvent = eventIterator.next();
       if (!tableName.equals(omdbUpdateEvent.getTable())) {
@@ -202,7 +203,7 @@ public static ReconOmTask.TaskResult 
processEvents(OMUpdateEventBatch events,
     }
     writeCountsToDB(fileSizeCountMap, dslContext, fileCountBySizeDao);
     LOG.debug("{} successfully processed in {} milliseconds", taskName,
-        (System.currentTimeMillis() - startTime));
+        (Time.monotonicNow() - startTime));
     return buildTaskResult(taskName, true);
   }
 
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java
index 7fe9584fdf..34fd1af6d6 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java
@@ -41,6 +41,7 @@
 import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.util.Time;
 import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
 import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
 import org.jooq.Configuration;
@@ -172,7 +173,7 @@ public TaskResult process(OMUpdateEventBatch events,
     String tableName;
     OMDBUpdateEvent<String, Object> omdbUpdateEvent;
     // Process each update event
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.monotonicNow();
     while (eventIterator.hasNext()) {
       omdbUpdateEvent = eventIterator.next();
       tableName = omdbUpdateEvent.getTable();
@@ -215,7 +216,7 @@ public TaskResult process(OMUpdateEventBatch events,
       writeDataToDB(replicatedSizeMap);
     }
     LOG.debug("{} successfully processed in {} milliseconds",
-        getTaskName(), (System.currentTimeMillis() - startTime));
+        getTaskName(), (Time.monotonicNow() - startTime));
     return buildTaskResult(true);
   }
 
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/PrepareSubCommand.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/PrepareSubCommand.java
index 4641025c18..fd6b2ec0a4 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/PrepareSubCommand.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/PrepareSubCommand.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse.PrepareStatus;
+import org.apache.hadoop.util.Time;
 import picocli.CommandLine;
 
 /**
@@ -112,10 +113,10 @@ public Void call() throws Exception {
     System.out.println();
     System.out.println("Checking individual OM instances for prepare request " 
+
         "completion...");
-    long endTime = System.currentTimeMillis() + pTimeout.toMillis();
+    long endTime = Time.monotonicNow() + pTimeout.toMillis();
     int expectedNumPreparedOms = omPreparedStatusMap.size();
     int currentNumPreparedOms = 0;
-    while (System.currentTimeMillis() < endTime &&
+    while (Time.monotonicNow() < endTime &&
         currentNumPreparedOms < expectedNumPreparedOms) {
       for (Map.Entry<String, Boolean> e : omPreparedStatusMap.entrySet()) {
         if (!e.getValue()) {
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
index eb9be2c292..9face29009 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
@@ -63,6 +63,7 @@
 import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
 import org.apache.hadoop.ozone.util.ShutdownHookManager;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Time;
 import org.apache.ratis.protocol.ClientId;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -180,7 +181,7 @@ private void taskLoop(TaskProvider provider) {
     while (!completed.get()) {
       long counter = attemptCounter.getAndIncrement();
       if (timebase) {
-        if (System.currentTimeMillis()
+        if (Time.monotonicNow()
             > startTime + TimeUnit.SECONDS.toMillis(durationInSecond)) {
           completed.set(true);
           break;
@@ -339,7 +340,7 @@ public void init() {
         freonCommand.isInteractive(), realTimeStatusSupplier());
     progressBar.start();
 
-    startTime = System.currentTimeMillis();
+    startTime = Time.monotonicNow();
   }
 
   public Supplier<String> realTimeStatusSupplier() {
@@ -371,7 +372,7 @@ public void printReport() {
 
     List<String> messages = new LinkedList<>();
     messages.add("Total execution time (sec): " +
-        Math.round((System.currentTimeMillis() - startTime) / 1000.0));
+        Math.round((Time.monotonicNow() - startTime) / 1000.0));
     messages.add("Failures: " + failureCounter.get());
     messages.add("Successful executions: " + successCounter.get());
     if (failureCounter.get() > 0) {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to