This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 403fb97d96 HDDS-12878. Move field declarations to start of class in 
tests (#8308)
403fb97d96 is described below

commit 403fb97d96b249c7e8d85e263b92ec0ecd5fa293
Author: Ivan Zlenko <[email protected]>
AuthorDate: Sun Apr 27 21:32:58 2025 +0500

    HDDS-12878. Move field declarations to start of class in tests (#8308)
---
 .../rawcoder/RawErasureCoderBenchmark.java         |   8 +-
 .../apache/hadoop/hdds/utils/db/TestRDBStore.java  |  21 +-
 .../rocksdiff/TestRocksDBCheckpointDiffer.java     | 319 ++++++++++-----------
 .../container/states/TestContainerAttribute.java   |   3 +-
 .../scm/pipeline/TestPipelinePlacementPolicy.java  |  36 +--
 .../org/apache/ozone/test/LambdaTestUtils.java     |   8 +-
 .../hadoop/ozone/shell/TestOzoneAddress.java       |   4 +-
 .../ozone/om/protocolPB/TestS3GrpcOmTransport.java |  22 +-
 .../hadoop/ozone/TestMiniChaosOzoneCluster.java    |   4 +-
 .../hadoop/ozone/loadgenerators/LoadGenerator.java |   5 +-
 .../fs/http/server/metrics/TestHttpFSMetrics.java  |  30 +-
 .../AbstractContractGetFileStatusTest.java         |   9 +-
 .../fs/ozone/AbstractOzoneFileSystemTest.java      |  10 +-
 .../ozone/AbstractRootedOzoneFileSystemTest.java   |  57 ++--
 .../hadoop/hdds/upgrade/TestHddsUpgradeUtils.java  |   7 +-
 .../ozone/client/rpc/read/TestInputStreamBase.java |   4 +-
 .../hadoop/ozone/om/TestOMRatisSnapshots.java      |   7 +-
 .../tools/contract/AbstractContractDistCpTest.java |  20 +-
 .../ozone/om/lock/TestOzoneLockProvider.java       |   6 +-
 .../GeneralValidatorsForTesting.java               |   4 +-
 .../ozone/om/upgrade/TestOMUpgradeFinalizer.java   |   4 +-
 .../hadoop/ozone/client/ObjectStoreStub.java       |   8 +-
 22 files changed, 298 insertions(+), 298 deletions(-)

diff --git 
a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/RawErasureCoderBenchmark.java
 
b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/RawErasureCoderBenchmark.java
index 89c15316ef..947f19e488 100644
--- 
a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/RawErasureCoderBenchmark.java
+++ 
b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/RawErasureCoderBenchmark.java
@@ -46,10 +46,6 @@
  */
 public final class RawErasureCoderBenchmark {
 
-  private RawErasureCoderBenchmark() {
-    // prevent instantiation
-  }
-
   // target size of input data buffer
   private static final int TARGET_BUFFER_SIZE_MB = 126;
 
@@ -61,6 +57,10 @@ private RawErasureCoderBenchmark() {
           Arrays.asList(new DummyRawErasureCoderFactory(),
               new RSRawErasureCoderFactory()));
 
+  private RawErasureCoderBenchmark() {
+    // prevent instantiation
+  }
+
   enum CODER {
     DUMMY_CODER("Dummy coder"),
     RS_CODER("Reed-Solomon Java coder");
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
index a4ee8412ba..c95fced12f 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
@@ -56,6 +56,17 @@
  * RDBStore Tests.
  */
 public class TestRDBStore {
+
+  public static final int MAX_DB_UPDATES_SIZE_THRESHOLD = 80;
+  private final List<String> families =
+      Arrays.asList(StringUtils.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
+          "First", "Second", "Third",
+          "Fourth", "Fifth",
+          "Sixth");
+  private RDBStore rdbStore = null;
+  private ManagedDBOptions options;
+  private Set<TableConfig> configSet;
+
   static ManagedDBOptions newManagedDBOptions() {
     final ManagedDBOptions options = new ManagedDBOptions();
     options.setCreateIfMissing(true);
@@ -81,16 +92,6 @@ public static RDBStore newRDBStore(File dbFile, 
ManagedDBOptions options,
         maxDbUpdatesSizeThreshold, true, null, true);
   }
 
-  public static final int MAX_DB_UPDATES_SIZE_THRESHOLD = 80;
-  private final List<String> families =
-      Arrays.asList(StringUtils.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
-          "First", "Second", "Third",
-          "Fourth", "Fifth",
-          "Sixth");
-  private RDBStore rdbStore = null;
-  private ManagedDBOptions options;
-  private Set<TableConfig> configSet;
-
   @BeforeEach
   public void setUp(@TempDir File tempDir) throws Exception {
     CodecBuffer.enableLeakDetection();
diff --git 
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
 
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
index 2bafef9818..50be6bfb70 100644
--- 
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
+++ 
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
@@ -118,8 +118,165 @@
  */
 public class TestRocksDBCheckpointDiffer {
 
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestRocksDBCheckpointDiffer.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestRocksDBCheckpointDiffer.class);
+
+  private static final List<List<String>> SST_FILES_BY_LEVEL = Arrays.asList(
+      Arrays.asList("000015", "000013", "000011", "000009"),
+      Arrays.asList("000018", "000016", "000017", "000026", "000024", "000022",
+          "000020"),
+      Arrays.asList("000027", "000030", "000028", "000029", "000031", "000039",
+          "000037", "000035", "000033"),
+      Arrays.asList("000040", "000044", "000042", "000043", "000045", "000041",
+          "000046", "000054", "000052", "000050", "000048"),
+      Arrays.asList("000059", "000055", "000056", "000060", "000057", "000058")
+  );
+
+  private static final List<List<CompactionNode>> COMPACTION_NODES_BY_LEVEL =
+      SST_FILES_BY_LEVEL.stream()
+          .map(sstFiles ->
+              sstFiles.stream()
+                  .map(
+                      sstFile -> new CompactionNode(sstFile,
+                          1000L,
+                          Long.parseLong(sstFile.substring(0, 6)),
+                          null, null, null
+                      ))
+                  .collect(Collectors.toList()))
+          .collect(Collectors.toList());
+
+  private final List<CompactionLogEntry> compactionLogEntryList = 
Arrays.asList(
+      new CompactionLogEntry(101, System.currentTimeMillis(),
+          Arrays.asList(
+              new CompactionFileInfo("000068", "/volume/bucket2",
+                  "/volume/bucket2", "bucketTable"),
+              new CompactionFileInfo("000057", "/volume/bucket1",
+                  "/volume/bucket1", "bucketTable")),
+          Collections.singletonList(
+              new CompactionFileInfo("000086", "/volume/bucket1",
+                  "/volume/bucket2", "bucketTable")),
+          null),
+      new CompactionLogEntry(178, System.currentTimeMillis(),
+          Arrays.asList(new CompactionFileInfo("000078",
+                  "/volume/bucket1/key-0000001411",
+                  "/volume/bucket2/key-0000099649",
+                  "keyTable"),
+              new CompactionFileInfo("000075",
+                  "/volume/bucket1/key-0000016536",
+                  "/volume/bucket2/key-0000098897",
+                  "keyTable"),
+              new CompactionFileInfo("000073",
+                  "/volume/bucket1/key-0000000730",
+                  "/volume/bucket2/key-0000097010",
+                  "keyTable"),
+              new CompactionFileInfo("000071",
+                  "/volume/bucket1/key-0000001820",
+                  "/volume/bucket2/key-0000097895",
+                  "keyTable"),
+              new CompactionFileInfo("000063",
+                  "/volume/bucket1/key-0000001016",
+                  "/volume/bucket1/key-0000099930",
+                  "keyTable")),
+          Collections.singletonList(new CompactionFileInfo("000081",
+              "/volume/bucket1/key-0000000730",
+              "/volume/bucket2/key-0000099649",
+              "keyTable")),
+          null
+      ),
+      new CompactionLogEntry(233, System.currentTimeMillis(),
+          Arrays.asList(
+              new CompactionFileInfo("000086", "/volume/bucket1",
+                  "/volume/bucket2", "bucketTable"),
+              new CompactionFileInfo("000088", "/volume/bucket3",
+                  "/volume/bucket3", "bucketTable")),
+          Collections.singletonList(
+              new CompactionFileInfo("000110", "/volume/bucket1",
+                  "/volume/bucket3", "bucketTable")
+          ),
+          null),
+      new CompactionLogEntry(256, System.currentTimeMillis(),
+          Arrays.asList(new CompactionFileInfo("000081",
+                  "/volume/bucket1/key-0000000730",
+                  "/volume/bucket2/key-0000099649",
+                  "keyTable"),
+              new CompactionFileInfo("000103",
+                  "/volume/bucket1/key-0000017460",
+                  "/volume/bucket3/key-0000097450",
+                  "keyTable"),
+              new CompactionFileInfo("000099",
+                  "/volume/bucket1/key-0000002310",
+                  "/volume/bucket3/key-0000098286",
+                  "keyTable"),
+              new CompactionFileInfo("000097",
+                  "/volume/bucket1/key-0000005965",
+                  "/volume/bucket3/key-0000099136",
+                  "keyTable"),
+              new CompactionFileInfo("000095",
+                  "/volume/bucket1/key-0000012424",
+                  "/volume/bucket3/key-0000083904",
+                  "keyTable")),
+          Collections.singletonList(new CompactionFileInfo("000106",
+              "/volume/bucket1/key-0000000730",
+              "/volume/bucket3/key-0000099136",
+              "keyTable")),
+          null),
+      new CompactionLogEntry(397, now(),
+          Arrays.asList(new CompactionFileInfo("000106",
+                  "/volume/bucket1/key-0000000730",
+                  "/volume/bucket3/key-0000099136",
+                  "keyTable"),
+              new CompactionFileInfo("000128",
+                  "/volume/bucket2/key-0000005031",
+                  "/volume/bucket3/key-0000084385",
+                  "keyTable"),
+              new CompactionFileInfo("000125",
+                  "/volume/bucket2/key-0000003491",
+                  "/volume/bucket3/key-0000088414",
+                  "keyTable"),
+              new CompactionFileInfo("000123",
+                  "/volume/bucket2/key-0000007390",
+                  "/volume/bucket3/key-0000094627",
+                  "keyTable"),
+              new CompactionFileInfo("000121",
+                  "/volume/bucket2/key-0000003232",
+                  "/volume/bucket3/key-0000094246",
+                  "keyTable")),
+          Collections.singletonList(new CompactionFileInfo("000131",
+              "/volume/bucket1/key-0000000730",
+              "/volume/bucket3/key-0000099136",
+              "keyTable")),
+          null
+      )
+  );
+
+  private static Map<String, String> columnFamilyToPrefixMap1 =
+      new HashMap<String, String>() {
+        {
+          put("keyTable", "/volume/bucket1/");
+          // Simply using bucketName instead of ID for the test.
+          put("directoryTable", "/volume/bucket1/");
+          put("fileTable", "/volume/bucket1/");
+        }
+      };
+
+  private static Map<String, String> columnFamilyToPrefixMap2 =
+      new HashMap<String, String>() {
+        {
+          put("keyTable", "/volume/bucket2/");
+          // Simply using bucketName instead of ID for the test.
+          put("directoryTable", "/volume/bucket2/");
+          put("fileTable", "/volume/bucket2/");
+        }
+      };
+
+  private static Map<String, String> columnFamilyToPrefixMap3 =
+      new HashMap<String, String>() {
+        {
+          put("keyTable", "/volume/bucket3/");
+          // Simply using bucketName instead of ID for the test.
+          put("directoryTable", "/volume/bucket3/");
+          put("fileTable", "/volume/bucket3/");
+        }
+      };
 
   private static final int NUM_ROW = 250000;
   private static final int SNAPSHOT_EVERY_SO_MANY_KEYS = 49999;
@@ -1005,30 +1162,6 @@ private void printMutableGraphFromAGivenNode(
     }
   }
 
-  private static final List<List<String>> SST_FILES_BY_LEVEL = Arrays.asList(
-      Arrays.asList("000015", "000013", "000011", "000009"),
-      Arrays.asList("000018", "000016", "000017", "000026", "000024", "000022",
-          "000020"),
-      Arrays.asList("000027", "000030", "000028", "000029", "000031", "000039",
-          "000037", "000035", "000033"),
-      Arrays.asList("000040", "000044", "000042", "000043", "000045", "000041",
-          "000046", "000054", "000052", "000050", "000048"),
-      Arrays.asList("000059", "000055", "000056", "000060", "000057", "000058")
-  );
-
-  private static final List<List<CompactionNode>> COMPACTION_NODES_BY_LEVEL =
-      SST_FILES_BY_LEVEL.stream()
-          .map(sstFiles ->
-              sstFiles.stream()
-                  .map(
-                      sstFile -> new CompactionNode(sstFile,
-                          1000L,
-                          Long.parseLong(sstFile.substring(0, 6)),
-                          null, null, null
-                      ))
-                  .collect(Collectors.toList()))
-          .collect(Collectors.toList());
-
   /**
    * Creates a backward compaction DAG from a list of level nodes.
    * It assumes that at each level files get compacted to the half of number
@@ -1703,140 +1836,6 @@ private void createFileWithContext(String fileName, 
String context)
     }
   }
 
-  private final List<CompactionLogEntry> compactionLogEntryList = 
Arrays.asList(
-      new CompactionLogEntry(101, System.currentTimeMillis(),
-          Arrays.asList(
-              new CompactionFileInfo("000068", "/volume/bucket2",
-                  "/volume/bucket2", "bucketTable"),
-              new CompactionFileInfo("000057", "/volume/bucket1",
-                  "/volume/bucket1", "bucketTable")),
-          Collections.singletonList(
-              new CompactionFileInfo("000086", "/volume/bucket1",
-                  "/volume/bucket2", "bucketTable")),
-          null),
-      new CompactionLogEntry(178, System.currentTimeMillis(),
-          Arrays.asList(new CompactionFileInfo("000078",
-                  "/volume/bucket1/key-0000001411",
-                  "/volume/bucket2/key-0000099649",
-                  "keyTable"),
-              new CompactionFileInfo("000075",
-                  "/volume/bucket1/key-0000016536",
-                  "/volume/bucket2/key-0000098897",
-                  "keyTable"),
-              new CompactionFileInfo("000073",
-                  "/volume/bucket1/key-0000000730",
-                  "/volume/bucket2/key-0000097010",
-                  "keyTable"),
-              new CompactionFileInfo("000071",
-                  "/volume/bucket1/key-0000001820",
-                  "/volume/bucket2/key-0000097895",
-                  "keyTable"),
-              new CompactionFileInfo("000063",
-                  "/volume/bucket1/key-0000001016",
-                  "/volume/bucket1/key-0000099930",
-                  "keyTable")),
-          Collections.singletonList(new CompactionFileInfo("000081",
-              "/volume/bucket1/key-0000000730",
-              "/volume/bucket2/key-0000099649",
-              "keyTable")),
-          null
-      ),
-      new CompactionLogEntry(233, System.currentTimeMillis(),
-          Arrays.asList(
-              new CompactionFileInfo("000086", "/volume/bucket1",
-                  "/volume/bucket2", "bucketTable"),
-              new CompactionFileInfo("000088", "/volume/bucket3",
-                  "/volume/bucket3", "bucketTable")),
-          Collections.singletonList(
-              new CompactionFileInfo("000110", "/volume/bucket1",
-                  "/volume/bucket3", "bucketTable")
-          ),
-          null),
-      new CompactionLogEntry(256, System.currentTimeMillis(),
-          Arrays.asList(new CompactionFileInfo("000081",
-                  "/volume/bucket1/key-0000000730",
-                  "/volume/bucket2/key-0000099649",
-                  "keyTable"),
-              new CompactionFileInfo("000103",
-                  "/volume/bucket1/key-0000017460",
-                  "/volume/bucket3/key-0000097450",
-                  "keyTable"),
-              new CompactionFileInfo("000099",
-                  "/volume/bucket1/key-0000002310",
-                  "/volume/bucket3/key-0000098286",
-                  "keyTable"),
-              new CompactionFileInfo("000097",
-                  "/volume/bucket1/key-0000005965",
-                  "/volume/bucket3/key-0000099136",
-                  "keyTable"),
-              new CompactionFileInfo("000095",
-                  "/volume/bucket1/key-0000012424",
-                  "/volume/bucket3/key-0000083904",
-                  "keyTable")),
-          Collections.singletonList(new CompactionFileInfo("000106",
-              "/volume/bucket1/key-0000000730",
-              "/volume/bucket3/key-0000099136",
-              "keyTable")),
-          null),
-      new CompactionLogEntry(397, now(),
-          Arrays.asList(new CompactionFileInfo("000106",
-                  "/volume/bucket1/key-0000000730",
-                  "/volume/bucket3/key-0000099136",
-                  "keyTable"),
-              new CompactionFileInfo("000128",
-                  "/volume/bucket2/key-0000005031",
-                  "/volume/bucket3/key-0000084385",
-                  "keyTable"),
-              new CompactionFileInfo("000125",
-                  "/volume/bucket2/key-0000003491",
-                  "/volume/bucket3/key-0000088414",
-                  "keyTable"),
-              new CompactionFileInfo("000123",
-                  "/volume/bucket2/key-0000007390",
-                  "/volume/bucket3/key-0000094627",
-                  "keyTable"),
-              new CompactionFileInfo("000121",
-                  "/volume/bucket2/key-0000003232",
-                  "/volume/bucket3/key-0000094246",
-                  "keyTable")),
-          Collections.singletonList(new CompactionFileInfo("000131",
-              "/volume/bucket1/key-0000000730",
-              "/volume/bucket3/key-0000099136",
-              "keyTable")),
-          null
-      )
-  );
-
-  private static Map<String, String> columnFamilyToPrefixMap1 =
-      new HashMap<String, String>() {
-        {
-          put("keyTable", "/volume/bucket1/");
-          // Simply using bucketName instead of ID for the test.
-          put("directoryTable", "/volume/bucket1/");
-          put("fileTable", "/volume/bucket1/");
-        }
-      };
-
-  private static Map<String, String> columnFamilyToPrefixMap2 =
-      new HashMap<String, String>() {
-        {
-          put("keyTable", "/volume/bucket2/");
-          // Simply using bucketName instead of ID for the test.
-          put("directoryTable", "/volume/bucket2/");
-          put("fileTable", "/volume/bucket2/");
-        }
-      };
-
-  private static Map<String, String> columnFamilyToPrefixMap3 =
-      new HashMap<String, String>() {
-        {
-          put("keyTable", "/volume/bucket3/");
-          // Simply using bucketName instead of ID for the test.
-          put("directoryTable", "/volume/bucket3/");
-          put("fileTable", "/volume/bucket3/");
-        }
-      };
-
   /**
    * Test cases for testGetSSTDiffListWithoutDB.
    */
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
index 8f321c8574..24da5351bd 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
@@ -33,7 +33,6 @@
  * Test ContainerAttribute management.
  */
 public class TestContainerAttribute {
-  enum Key { K1, K2, K3 }
 
   private final Key key1 = Key.K1;
   private final Key key2 = Key.K2;
@@ -122,4 +121,6 @@ public void tesUpdate() throws SCMException {
     assertThrows(SCMException.class,
         () -> containerAttribute.update(key3, key1, id));
   }
+
+  enum Key { K1, K2, K3 }
 }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
index 15dc614a32..80df75b932 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
@@ -81,6 +81,24 @@
  * Test for PipelinePlacementPolicy.
  */
 public class TestPipelinePlacementPolicy {
+  private static final Node[] NODES = new NodeImpl[] {
+      new NodeImpl("h1", "/r1", NetConstants.NODE_COST_DEFAULT),
+      new NodeImpl("h2", "/r1", NetConstants.NODE_COST_DEFAULT),
+      new NodeImpl("h3", "/r2", NetConstants.NODE_COST_DEFAULT),
+      new NodeImpl("h4", "/r2", NetConstants.NODE_COST_DEFAULT),
+      new NodeImpl("h5", "/r3", NetConstants.NODE_COST_DEFAULT),
+      new NodeImpl("h6", "/r3", NetConstants.NODE_COST_DEFAULT),
+      new NodeImpl("h7", "/r4", NetConstants.NODE_COST_DEFAULT),
+      new NodeImpl("h8", "/r4", NetConstants.NODE_COST_DEFAULT),
+  };
+
+  // 3 racks with single node.
+  private static final Node[] SINGLE_NODE_RACK = new NodeImpl[] {
+      new NodeImpl("h1", "/r1", NetConstants.NODE_COST_DEFAULT),
+      new NodeImpl("h2", "/r2", NetConstants.NODE_COST_DEFAULT),
+      new NodeImpl("h3", "/r3", NetConstants.NODE_COST_DEFAULT)
+  };
+
   private MockNodeManager nodeManager;
   private PipelineStateManager stateManager;
   private OzoneConfiguration conf;
@@ -359,24 +377,6 @@ public void testRackAwarenessNotEnabledWithFallBack() 
throws SCMException {
     assertEquals(results.get(0).getNetworkLocation(), 
results.get(2).getNetworkLocation());
   }
 
-  private static final Node[] NODES = new NodeImpl[] {
-      new NodeImpl("h1", "/r1", NetConstants.NODE_COST_DEFAULT),
-      new NodeImpl("h2", "/r1", NetConstants.NODE_COST_DEFAULT),
-      new NodeImpl("h3", "/r2", NetConstants.NODE_COST_DEFAULT),
-      new NodeImpl("h4", "/r2", NetConstants.NODE_COST_DEFAULT),
-      new NodeImpl("h5", "/r3", NetConstants.NODE_COST_DEFAULT),
-      new NodeImpl("h6", "/r3", NetConstants.NODE_COST_DEFAULT),
-      new NodeImpl("h7", "/r4", NetConstants.NODE_COST_DEFAULT),
-      new NodeImpl("h8", "/r4", NetConstants.NODE_COST_DEFAULT),
-  };
-
-  // 3 racks with single node.
-  private static final Node[] SINGLE_NODE_RACK = new NodeImpl[] {
-      new NodeImpl("h1", "/r1", NetConstants.NODE_COST_DEFAULT),
-      new NodeImpl("h2", "/r2", NetConstants.NODE_COST_DEFAULT),
-      new NodeImpl("h3", "/r3", NetConstants.NODE_COST_DEFAULT)
-  };
-
   private NetworkTopology createNetworkTopologyOnDifRacks() {
     NetworkTopology topology =
         new NetworkTopologyImpl(new OzoneConfiguration());
diff --git 
a/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/LambdaTestUtils.java
 
b/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/LambdaTestUtils.java
index b3529ff10b..81ca218fab 100644
--- 
a/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/LambdaTestUtils.java
+++ 
b/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/LambdaTestUtils.java
@@ -36,14 +36,14 @@
  * with jitter: test author gets to choose).
  */
 public final class LambdaTestUtils {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(LambdaTestUtils.class);
 
-  private LambdaTestUtils() {
-  }
+  private static final Logger LOG = 
LoggerFactory.getLogger(LambdaTestUtils.class);
 
   public static final String NULL_RESULT = "(null)";
 
+  private LambdaTestUtils() {
+  }
+
   /**
    * Interface to implement for converting a timeout into some form
    * of exception to raise.
diff --git 
a/hadoop-ozone/cli-shell/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java
 
b/hadoop-ozone/cli-shell/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java
index 30e4192ce6..90d35841ac 100644
--- 
a/hadoop-ozone/cli-shell/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java
+++ 
b/hadoop-ozone/cli-shell/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java
@@ -34,6 +34,8 @@
  */
 public class TestOzoneAddress {
 
+  private OzoneAddress address;
+
   public static Collection<Object[]> data() {
     return Arrays.asList(new Object[][] {
         {"o3://localhost:9878/"},
@@ -44,8 +46,6 @@ public static Collection<Object[]> data() {
     });
   }
 
-  private OzoneAddress address;
-
   @ParameterizedTest
   @MethodSource("data")
   public void checkRootUrlType(String prefix) throws OzoneClientException {
diff --git 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
index 69d69f7fcb..d5b8723e0e 100644
--- 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
+++ 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
@@ -75,17 +75,6 @@ public class TestS3GrpcOmTransport {
 
   private String serverName;
 
-  private ServiceException createNotLeaderException() {
-    RaftPeerId raftPeerId = RaftPeerId.getRaftPeerId("testNodeId");
-
-    // TODO: Set suggest leaderID. Right now, client is not using suggest
-    // leaderID. Need to fix this.
-    OMNotLeaderException notLeaderException =
-        new OMNotLeaderException(raftPeerId);
-    LOG.debug(notLeaderException.getMessage());
-    return new ServiceException(notLeaderException);
-  }
-
   private final OzoneManagerServiceGrpc.OzoneManagerServiceImplBase
       serviceImpl =
       mock(OzoneManagerServiceGrpc.OzoneManagerServiceImplBase.class,
@@ -120,6 +109,17 @@ public void 
submitRequest(org.apache.hadoop.ozone.protocol.proto
 
   private GrpcOmTransport client;
 
+  private ServiceException createNotLeaderException() {
+    RaftPeerId raftPeerId = RaftPeerId.getRaftPeerId("testNodeId");
+
+    // TODO: Set suggest leaderID. Right now, client is not using suggest
+    // leaderID. Need to fix this.
+    OMNotLeaderException notLeaderException =
+        new OMNotLeaderException(raftPeerId);
+    LOG.debug(notLeaderException.getMessage());
+    return new ServiceException(notLeaderException);
+  }
+
   @BeforeEach
   public void setUp() throws Exception {
     // Generate a unique in-process server name.
diff --git 
a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java
 
b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java
index 1acc9386e6..a4c4ea0d32 100644
--- 
a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java
+++ 
b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java
@@ -55,8 +55,6 @@ public class TestMiniChaosOzoneCluster extends GenericCli {
   private final List<Class<? extends LoadGenerator>> loadClasses
       = new ArrayList<>();
 
-  enum AllowedBucketLayouts { FILE_SYSTEM_OPTIMIZED, OBJECT_STORE }
-
   @Option(names = {"-d", "--num-datanodes", "--numDatanodes"},
       description = "num of datanodes. Full name --numDatanodes will be" +
           " removed in later versions.")
@@ -219,4 +217,6 @@ void test() throws Exception {
     numMinutes = 2;
     startChaosCluster();
   }
+
+  enum AllowedBucketLayouts { FILE_SYSTEM_OPTIMIZED, OBJECT_STORE }
 }
diff --git 
a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadGenerator.java
 
b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadGenerator.java
index 52cd4440c8..7e8c913806 100644
--- 
a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadGenerator.java
+++ 
b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadGenerator.java
@@ -24,6 +24,9 @@
  * Interface for load generator.
  */
 public abstract class LoadGenerator {
+
+  private static final String KEY_NAME_DELIMITER = "_";
+
   public static List<Class<? extends LoadGenerator>> getClassList() {
     List<Class<? extends LoadGenerator>> classList = new ArrayList<>();
 
@@ -49,8 +52,6 @@ public static List<Class<? extends LoadGenerator>> 
getClassList() {
    * }
    */
 
-  private static final String KEY_NAME_DELIMITER = "_";
-
   public abstract void initialize() throws Exception;
 
   public abstract void generateLoad() throws Exception;
diff --git 
a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/ozone/fs/http/server/metrics/TestHttpFSMetrics.java
 
b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/ozone/fs/http/server/metrics/TestHttpFSMetrics.java
index c34df89505..fe90963142 100644
--- 
a/hadoop-ozone/httpfsgateway/src/test/java/org/apache/ozone/fs/http/server/metrics/TestHttpFSMetrics.java
+++ 
b/hadoop-ozone/httpfsgateway/src/test/java/org/apache/ozone/fs/http/server/metrics/TestHttpFSMetrics.java
@@ -52,21 +52,6 @@ public class TestHttpFSMetrics {
   private static FileSystem mockFs = mock(FileSystem.class);
   private static FSDataOutputStream fsDataOutputStream = 
mock(FSDataOutputStream.class);
 
-  /**
-   * Mock FileSystemAccessService.
-   */
-  public static class MockFileSystemAccessService extends 
FileSystemAccessService {
-    @Override
-    protected FileSystem createFileSystem(Configuration namenodeConf) throws 
IOException {
-      return mockFs;
-    }
-
-    @Override
-    protected void closeFileSystem(FileSystem fs) throws IOException {
-      // do nothing
-    }
-  }
-
   private HttpFSServerWebApp webApp;
   private HttpFSServerMetrics metrics;
   private Configuration conf;
@@ -143,4 +128,19 @@ public void testFsAppend() throws Exception {
     assertEquals(initialAppendOps + 1, metrics.getOpsAppend());
     assertEquals(initialBytesWritten + 4, metrics.getBytesWritten());
   }
+
+  /**
+   * Mock FileSystemAccessService.
+   */
+  public static class MockFileSystemAccessService extends 
FileSystemAccessService {
+    @Override
+    protected FileSystem createFileSystem(Configuration namenodeConf) throws 
IOException {
+      return mockFs;
+    }
+
+    @Override
+    protected void closeFileSystem(FileSystem fs) throws IOException {
+      // do nothing
+    }
+  }
 }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
index 3c62094393..6987b46167 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
@@ -47,8 +47,10 @@
 /**
  * Test getFileStatus and related listing operations.
  */
-public abstract class AbstractContractGetFileStatusTest extends
-    AbstractFSContractTestBase {
+public abstract class AbstractContractGetFileStatusTest extends 
AbstractFSContractTestBase {
+
+  private static final PathFilter ALL_PATHS = new AllPathsFilter();
+  private static final PathFilter NO_PATHS = new NoPathsFilter();
 
   private Path testPath;
   private Path target;
@@ -691,9 +693,6 @@ private List<LocatedFileStatus> 
verifyListLocatedStatusNextCalls(
     return result;
   }
 
-  private static final PathFilter ALL_PATHS = new AllPathsFilter();
-  private static final PathFilter NO_PATHS = new NoPathsFilter();
-
   /**
    * Accept everything.
    */
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
index f3218e4c16..a4d7e9675d 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
@@ -149,11 +149,6 @@ abstract class AbstractOzoneFileSystemTest {
       p -> !p.toUri().getPath().startsWith(TRASH_ROOT.toString());
   private String fsRoot;
 
-  AbstractOzoneFileSystemTest(boolean setDefaultFs, BucketLayout layout) {
-    enabledFileSystemPaths = setDefaultFs;
-    bucketLayout = layout;
-  }
-
   private static final Logger LOG =
       LoggerFactory.getLogger(AbstractOzoneFileSystemTest.class);
 
@@ -176,6 +171,11 @@ abstract class AbstractOzoneFileSystemTest {
       .createUserForTesting(USER1,  new String[] {"usergroup"});
   private OzoneFileSystem userO3fs;
 
+  AbstractOzoneFileSystemTest(boolean setDefaultFs, BucketLayout layout) {
+    enabledFileSystemPaths = setDefaultFs;
+    bucketLayout = layout;
+  }
+
   @BeforeAll
   void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
index d86e2f3b11..df4913789f 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
@@ -39,6 +39,7 @@
 import static 
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.LIST;
 import static 
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ;
 import static 
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE;
+import static 
org.apache.hadoop.security.UserGroupInformation.createUserForTesting;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -146,12 +147,38 @@
 @TestInstance(TestInstance.Lifecycle.PER_CLASS)
 abstract class AbstractRootedOzoneFileSystemTest {
 
-  private static final Logger LOG =
-      LoggerFactory.getLogger(AbstractRootedOzoneFileSystemTest.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(AbstractRootedOzoneFileSystemTest.class);
 
   private static final float TRASH_INTERVAL = 0.05f; // 3 seconds
+
+  private static final String USER1 = "regularuser1";
+  private static final UserGroupInformation UGI_USER1 = 
createUserForTesting(USER1,  new String[] {"usergroup"});
+
   private OzoneClient client;
 
+  private final boolean enabledFileSystemPaths;
+  private final boolean isBucketFSOptimized;
+  private final boolean enableAcl;
+
+  private OzoneConfiguration conf;
+  private MiniOzoneCluster cluster;
+  private FileSystem fs;
+  private RootedOzoneFileSystem ofs;
+  private ObjectStore objectStore;
+  private BasicRootedOzoneClientAdapterImpl adapter;
+  private Trash trash;
+
+  private String volumeName;
+  private Path volumePath;
+  private String bucketName;
+  // Store path commonly used by tests that test functionality within a bucket
+  private Path bucketPath;
+  private String rootPath;
+  private final BucketLayout bucketLayout;
+
+  // Non-privileged OFS instance
+  private RootedOzoneFileSystem userOfs;
+
   AbstractRootedOzoneFileSystemTest(BucketLayout bucketLayout, boolean 
setDefaultFs,
       boolean isAclEnabled) {
     // Initialize the cluster before EACH set of parameters
@@ -195,32 +222,6 @@ public Path getBucketPath() {
     return bucketPath;
   }
 
-  private final boolean enabledFileSystemPaths;
-  private final boolean isBucketFSOptimized;
-  private final boolean enableAcl;
-
-  private OzoneConfiguration conf;
-  private MiniOzoneCluster cluster;
-  private FileSystem fs;
-  private RootedOzoneFileSystem ofs;
-  private ObjectStore objectStore;
-  private BasicRootedOzoneClientAdapterImpl adapter;
-  private Trash trash;
-
-  private String volumeName;
-  private Path volumePath;
-  private String bucketName;
-  // Store path commonly used by tests that test functionality within a bucket
-  private Path bucketPath;
-  private String rootPath;
-  private final BucketLayout bucketLayout;
-
-  private static final String USER1 = "regularuser1";
-  private static final UserGroupInformation UGI_USER1 = UserGroupInformation
-      .createUserForTesting(USER1,  new String[] {"usergroup"});
-  // Non-privileged OFS instance
-  private RootedOzoneFileSystem userOfs;
-
   @BeforeAll
   void initClusterAndEnv() throws IOException, InterruptedException, 
TimeoutException {
     conf = new OzoneConfiguration();
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java
index 4531b3c738..aa7c78b2e5 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java
@@ -58,15 +58,14 @@
  */
 public final class TestHddsUpgradeUtils {
 
-  private TestHddsUpgradeUtils() { }
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestHddsUpgradeUtils.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestHddsUpgradeUtils.class);
 
   private static final ReplicationConfig RATIS_THREE =
       
ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS,
           HddsProtos.ReplicationFactor.THREE);
 
+  private TestHddsUpgradeUtils() { }
+
   public static void waitForFinalizationFromClient(
       StorageContainerLocationProtocol scmClient, String clientID)
       throws Exception {
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
index 8bdcd39ab4..79ef3f50b2 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
@@ -49,6 +49,8 @@ abstract class TestInputStreamBase {
   static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE;   // 8MB
   static final int BYTES_PER_CHECKSUM = 256 * 1024;   // 256KB
 
+  private MiniOzoneCluster cluster;
+
   protected static MiniOzoneCluster newCluster() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
 
@@ -90,8 +92,6 @@ protected void updateConfig(ContainerLayoutVersion layout) {
     closeContainers();
   }
 
-  private MiniOzoneCluster cluster;
-
   protected MiniOzoneCluster getCluster() {
     return cluster;
   }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
index c1b2d002e1..680aa6967e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
@@ -104,6 +104,9 @@
  */
 @Timeout(5000)
 public class TestOMRatisSnapshots {
+  // tried up to 1000 snapshots and this test works, but some of the
+  //  timeouts have to be increased.
+  private static final int SNAPSHOTS_TO_CREATE = 100;
 
   private MiniOzoneHAClusterImpl cluster = null;
   private ObjectStore objectStore;
@@ -191,10 +194,6 @@ public void shutdown() {
     }
   }
 
-  // tried up to 1000 snapshots and this test works, but some of the
-  //  timeouts have to be increased.
-  private static final int SNAPSHOTS_TO_CREATE = 100;
-
   @Test
   public void testInstallSnapshot(@TempDir Path tempDir) throws Exception {
     // Get the leader OM
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
index c7e9fa3bbc..0a1aeedc9e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
@@ -102,16 +102,6 @@ public abstract class AbstractContractDistCpTest
    */
   protected static final int DEFAULT_WIDTH = 2;
 
-  /**
-   * The timeout value is extended over the default so that large updates
-   * are allowed to take time, especially to remote stores.
-   * @return the current test timeout
-   */
-  @Override
-  protected int getTestTimeoutMillis() {
-    return 15  * 60 * 1000;
-  }
-
   private Configuration conf;
   private FileSystem localFS, remoteFS;
   private Path localDir, remoteDir;
@@ -152,6 +142,16 @@ protected int getTestTimeoutMillis() {
 
   private Path inputDirUnderOutputDir;
 
+  /**
+   * The timeout value is extended over the default so that large updates
+   * are allowed to take time, especially to remote stores.
+   * @return the current test timeout
+   */
+  @Override
+  protected int getTestTimeoutMillis() {
+    return 15  * 60 * 1000;
+  }
+
   @Override
   protected Configuration createConfiguration() {
     Configuration newConf = new Configuration();
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneLockProvider.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneLockProvider.java
index a1b46df3d7..84c75e87bc 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneLockProvider.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneLockProvider.java
@@ -42,6 +42,9 @@ public class TestOzoneLockProvider {
   private OzoneManager ozoneManager;
   private OzoneLockStrategy ozoneLockStrategy;
 
+  private boolean keyPathLockEnabled;
+  private boolean enableFileSystemPaths;
+
   public static Collection<Object[]> data() {
     return Arrays.asList(
         new Object[]{true, true},
@@ -50,9 +53,6 @@ public static Collection<Object[]> data() {
         new Object[]{false, false});
   }
 
-  private boolean keyPathLockEnabled;
-  private boolean enableFileSystemPaths;
-
   @BeforeEach
   public void setup() throws Exception {
     ozoneManager = mock(OzoneManager.class);
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/testvalidatorset1/GeneralValidatorsForTesting.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/testvalidatorset1/GeneralValidatorsForTesting.java
index 7308969287..07269c9e97 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/testvalidatorset1/GeneralValidatorsForTesting.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/testvalidatorset1/GeneralValidatorsForTesting.java
@@ -52,6 +52,8 @@ public final class GeneralValidatorsForTesting {
    */
   private static boolean validatorTestsRunning = false;
 
+  private static List<ValidationListener> listeners = new ArrayList<>();
+
   public static void startValidatorTest() {
     validatorTestsRunning = true;
   }
@@ -74,8 +76,6 @@ public interface ValidationListener {
     void validationCalled(String calledMethodName);
   }
 
-  private static List<ValidationListener> listeners = new ArrayList<>();
-
   public static void addListener(ValidationListener listener) {
     listeners.add(listener);
   }
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java
index 687bd9ca4a..60b192d392 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java
@@ -65,6 +65,8 @@ public class TestOMUpgradeFinalizer {
   @Mock
   private OMLayoutVersionManager versionManager;
 
+  private int storedLayoutVersion = 0;
+
   @Test
   public void testEmitsFinalizedStatusIfAlreadyFinalized() throws Exception {
 
@@ -252,8 +254,6 @@ private Iterable<OMLayoutFeature> mockFeatures(
     return ret;
   }
 
-  private int storedLayoutVersion = 0;
-
   private OzoneManager mockOzoneManager(int initialLayoutVersion) {
     OzoneManager mock = mock(OzoneManager.class);
     OMStorage st = mock(OMStorage.class);
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java
index d96236f708..42671e2c29 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java
@@ -39,6 +39,10 @@
  */
 public class ObjectStoreStub extends ObjectStore {
 
+  private static OzoneConfiguration conf = new OzoneConfiguration();
+  private Map<String, OzoneVolumeStub> volumes = new HashMap<>();
+  private Map<String, Boolean> bucketEmptyStatus = new HashMap<>();
+
   public ObjectStoreStub() {
     super();
   }
@@ -47,10 +51,6 @@ public ObjectStoreStub(ConfigurationSource conf, 
ClientProtocol proxy) {
     super(conf, proxy);
   }
 
-  private Map<String, OzoneVolumeStub> volumes = new HashMap<>();
-  private Map<String, Boolean> bucketEmptyStatus = new HashMap<>();
-  private static OzoneConfiguration conf = new OzoneConfiguration();
-
   @Override
   public void createVolume(String volumeName) throws IOException {
     createVolume(volumeName,


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to