This is an automated email from the ASF dual-hosted git repository.
myskov pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 466028abad HDDS-13559. Convert redundant fields to local var in
ozone-integration-test module (#8922)
466028abad is described below
commit 466028abadb4250d57f47d2eb05e0b4b9267500e
Author: Ivan Zlenko <[email protected]>
AuthorDate: Tue Aug 26 14:11:31 2025 +0400
HDDS-13559. Convert redundant fields to local var in ozone-integration-test
module (#8922)
---
.../fs/contract/AbstractContractConcatTest.java | 3 +-
.../AbstractContractGetFileStatusTest.java | 3 +-
.../fs/contract/AbstractContractSetTimesTest.java | 3 +-
.../apache/hadoop/fs/ozone/TestLeaseRecovery.java | 3 +-
.../hadoop/fs/ozone/TestOzoneFileChecksum.java | 9 ++--
.../apache/hadoop/hdds/scm/TestCommitInRatis.java | 34 ++++++---------
.../hadoop/hdds/scm/TestFailoverWithSCMHA.java | 17 ++++----
.../hdds/scm/TestSCMDbCheckpointServlet.java | 9 ++--
.../hdds/scm/TestSCMInstallSnapshotWithHA.java | 19 ++++----
.../apache/hadoop/hdds/scm/TestSCMSnapshot.java | 3 +-
.../hadoop/hdds/scm/TestSecretKeySnapshot.java | 5 +--
.../hdds/scm/TestStorageContainerManagerHA.java | 5 +--
.../apache/hadoop/hdds/scm/TestWatchForCommit.java | 26 +++++------
.../hadoop/hdds/scm/TestXceiverClientMetrics.java | 3 +-
.../hdds/scm/pipeline/TestMultiRaftSetup.java | 6 +--
.../hdds/scm/pipeline/TestPipelineClose.java | 7 +--
.../TestSCMPipelineBytesWrittenMetrics.java | 3 +-
.../hadoop/hdds/scm/pipeline/TestSCMRestart.java | 6 +--
.../safemode/TestSCMSafeModeWithPipelineRules.java | 3 +-
.../hadoop/hdds/scm/storage/TestCommitWatcher.java | 51 ++++++++++------------
.../hdds/scm/storage/TestContainerCommandsEC.java | 3 +-
.../org/apache/hadoop/ozone/TestBlockTokens.java | 5 +--
.../apache/hadoop/ozone/TestBlockTokensCLI.java | 5 +--
.../apache/hadoop/ozone/TestOMSortDatanodes.java | 4 +-
.../ozone/client/rpc/OzoneRpcClientTests.java | 6 +--
.../rpc/TestContainerReplicationEndToEnd.java | 3 +-
.../rpc/TestContainerStateMachineFailures.java | 3 +-
.../rpc/TestContainerStateMachineFlushDelay.java | 21 ++++-----
.../client/rpc/TestDeleteWithInAdequateDN.java | 3 +-
.../rpc/TestFailureHandlingByClientFlushDelay.java | 27 +++++-------
.../client/rpc/TestHybridPipelineOnDatanode.java | 3 +-
.../rpc/TestMultiBlockWritesWithDnFailures.java | 3 +-
...estOzoneClientRetriesOnExceptionFlushDelay.java | 24 +++++-----
.../rpc/TestOzoneClientRetriesOnExceptions.java | 28 +++++-------
.../ozone/container/TestECContainerRecovery.java | 41 ++++++++---------
.../TestContainerCommandReconciliation.java | 5 +--
.../TestDatanodeHddsVolumeFailureToleration.java | 3 +-
.../freon/TestFreonWithDatanodeFastRestart.java | 3 +-
.../ozone/freon/TestFreonWithPipelineDestroy.java | 3 +-
.../hadoop/ozone/fsck/TestContainerMapper.java | 3 +-
.../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 5 +--
.../hadoop/ozone/om/TestOMRatisSnapshots.java | 9 ++--
.../ozone/om/TestOmContainerLocationCache.java | 3 +-
.../ozone/om/TestOzoneManagerConfiguration.java | 22 +++++-----
.../hadoop/ozone/om/TestOzoneManagerRestart.java | 3 +-
.../ozone/om/service/TestRangerBGSyncService.java | 12 ++---
.../ozone/om/service/TestRootedDDSWithFSO.java | 6 +--
.../snapshot/TestOzoneManagerSnapshotProvider.java | 12 ++---
.../ozone/shell/TestDeletedBlocksTxnShell.java | 12 ++---
.../hadoop/ozone/shell/TestOzoneTenantShell.java | 3 +-
.../ozone/shell/TestTransferLeadershipShell.java | 23 +++++-----
.../tools/contract/AbstractContractDistCpTest.java | 8 +---
52 files changed, 217 insertions(+), 315 deletions(-)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
index 08659c9f9f..2a7c48acd8 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
@@ -34,7 +34,6 @@
*/
public abstract class AbstractContractConcatTest extends
AbstractFSContractTestBase {
- private Path testPath;
private Path srcFile;
private Path zeroByteFile;
private Path target;
@@ -46,7 +45,7 @@ public void setup() throws Exception {
skipIfUnsupported(SUPPORTS_CONCAT);
//delete the test directory
- testPath = path("test");
+ Path testPath = path("test");
srcFile = new Path(testPath, "small.txt");
zeroByteFile = new Path(testPath, "zero.txt");
target = new Path(testPath, "target");
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
index 6987b46167..6d09248a09 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
@@ -52,7 +52,6 @@ public abstract class AbstractContractGetFileStatusTest
extends AbstractFSContra
private static final PathFilter ALL_PATHS = new AllPathsFilter();
private static final PathFilter NO_PATHS = new NoPathsFilter();
- private Path testPath;
private Path target;
// the tree parameters. Kept small to avoid killing object store test
@@ -70,7 +69,7 @@ public void setup() throws Exception {
skipIfUnsupported(SUPPORTS_GETFILESTATUS);
//delete the test directory
- testPath = path("test");
+ Path testPath = path("test");
target = new Path(testPath, "target");
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java
index 5cd71e7ecc..cc8b40add6 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java
@@ -30,7 +30,6 @@
public abstract class AbstractContractSetTimesTest extends
AbstractFSContractTestBase {
- private Path testPath;
private Path target;
@BeforeEach
@@ -40,7 +39,7 @@ public void setup() throws Exception {
skipIfUnsupported(SUPPORTS_SETTIMES);
//delete the test directory
- testPath = path("test");
+ Path testPath = path("test");
target = new Path(testPath, "target");
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java
index eaf98317c7..800cc04f47 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java
@@ -99,7 +99,6 @@ public class TestLeaseRecovery extends OzoneTestBase {
private static final AtomicInteger FILE_COUNTER = new AtomicInteger();
private MiniOzoneCluster cluster;
- private OzoneBucket bucket;
private OzoneClient client;
private final OzoneConfiguration conf = new OzoneConfiguration();
@@ -163,7 +162,7 @@ public void init() throws IOException, InterruptedException,
client = cluster.newClient();
// create a volume and a bucket to be used by OzoneFileSystem
- bucket = TestDataUtil.createVolumeAndBucket(client, layout);
+ OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, layout);
GenericTestUtils.setLogLevel(XceiverClientGrpc.class, Level.DEBUG);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java
index 3e4c991d4b..20bc7bb44e 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java
@@ -83,9 +83,6 @@ public class TestOzoneFileChecksum {
private OzoneConfiguration conf;
private MiniOzoneCluster cluster = null;
private FileSystem fs;
- private RootedOzoneFileSystem ofs;
- private BasicRootedOzoneClientAdapterImpl adapter;
- private String rootPath;
private OzoneClient client;
@BeforeEach
@@ -99,7 +96,7 @@ void setup() throws IOException,
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
- rootPath = String.format("%s://%s/",
+ String rootPath = String.format("%s://%s/",
OzoneConsts.OZONE_OFS_URI_SCHEME, conf.get(OZONE_OM_ADDRESS_KEY));
String disableCache = String.format("fs.%s.impl.disable.cache",
OzoneConsts.OZONE_OFS_URI_SCHEME);
@@ -126,8 +123,8 @@ void testEcFileChecksum(List<Integer> missingIndexes,
double checksumSizeInMB) t
conf.setInt("ozone.client.bytes.per.checksum", (int) (checksumSizeInMB *
1024 * 1024));
fs = FileSystem.get(conf);
- ofs = (RootedOzoneFileSystem) fs;
- adapter = (BasicRootedOzoneClientAdapterImpl) ofs.getAdapter();
+ RootedOzoneFileSystem ofs = (RootedOzoneFileSystem) fs;
+ BasicRootedOzoneClientAdapterImpl adapter =
(BasicRootedOzoneClientAdapterImpl) ofs.getAdapter();
String volumeName = UUID.randomUUID().toString();
String legacyBucket = UUID.randomUUID().toString();
String ecBucketName = UUID.randomUUID().toString();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCommitInRatis.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCommitInRatis.java
index 44b32bd5c6..8e1ee32980 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCommitInRatis.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCommitInRatis.java
@@ -51,15 +51,14 @@
* This class tests the 2 way and 3 way commit in Ratis.
*/
public class TestCommitInRatis {
+ private static final String VOLUME_NAME = "watchforcommithandlingtest";
+ private static final int CHUNK_SIZE = 100;
+ private static final int FLUSH_SIZE = 2 * CHUNK_SIZE;
+ private static final int MAX_FLUSH_SIZE = 2 * FLUSH_SIZE;
+ private static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE;
+
private MiniOzoneCluster cluster;
private OzoneClient client;
- private ObjectStore objectStore;
- private String volumeName;
- private String bucketName;
- private int chunkSize;
- private int flushSize;
- private int maxFlushSize;
- private int blockSize;
private StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
@@ -71,11 +70,6 @@ public class TestCommitInRatis {
* @throws IOException
*/
private void startCluster(OzoneConfiguration conf) throws Exception {
- chunkSize = 100;
- flushSize = 2 * chunkSize;
- maxFlushSize = 2 * flushSize;
- blockSize = 2 * maxFlushSize;
-
// Make sure the pipeline does not get destroyed quickly
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 60000,
TimeUnit.SECONDS);
@@ -92,10 +86,10 @@ private void startCluster(OzoneConfiguration conf) throws
Exception {
conf.setFromObject(raftClientConfig);
ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
+ .setBlockSize(BLOCK_SIZE)
+ .setChunkSize(CHUNK_SIZE)
+ .setStreamBufferFlushSize(FLUSH_SIZE)
+ .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
.applyTo(conf);
conf.setQuietMode(false);
@@ -105,11 +99,9 @@ private void startCluster(OzoneConfiguration conf) throws
Exception {
cluster.waitForClusterToBeReady();
// the easiest way to create an open container is creating a key
client = OzoneClientFactory.getRpcClient(conf);
- objectStore = client.getObjectStore();
- volumeName = "watchforcommithandlingtest";
- bucketName = volumeName;
- objectStore.createVolume(volumeName);
- objectStore.getVolume(volumeName).createBucket(bucketName);
+ ObjectStore objectStore = client.getObjectStore();
+ objectStore.createVolume(VOLUME_NAME);
+ objectStore.getVolume(VOLUME_NAME).createBucket(VOLUME_NAME);
storageContainerLocationClient = cluster
.getStorageContainerLocationClient();
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java
index 68617011d7..cb9baa6855 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java
@@ -56,12 +56,13 @@
* Tests failover with SCM HA setup.
*/
public class TestFailoverWithSCMHA {
+ private static final String OM_SERVICE_ID = "om-service-test1";
+ private static final String SCM_SERVICE_ID = "scm-service-test1";
+ private static final int NUM_OF_OMS = 1;
+ private static final int NUM_OF_SCMS = 3;
+
private MiniOzoneHAClusterImpl cluster = null;
private OzoneConfiguration conf;
- private String omServiceId;
- private String scmServiceId;
- private int numOfOMs = 1;
- private int numOfSCMs = 3;
private static final long SNAPSHOT_THRESHOLD = 5;
@@ -73,15 +74,13 @@ public class TestFailoverWithSCMHA {
@BeforeEach
public void init() throws Exception {
conf = new OzoneConfiguration();
- omServiceId = "om-service-test1";
- scmServiceId = "scm-service-test1";
conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD,
SNAPSHOT_THRESHOLD);
cluster = MiniOzoneCluster.newHABuilder(conf)
- .setOMServiceId(omServiceId)
- .setSCMServiceId(scmServiceId).setNumOfOzoneManagers(numOfOMs)
- .setNumOfStorageContainerManagers(numOfSCMs).setNumOfActiveSCMs(3)
+ .setOMServiceId(OM_SERVICE_ID)
+ .setSCMServiceId(SCM_SERVICE_ID).setNumOfOzoneManagers(NUM_OF_OMS)
+ .setNumOfStorageContainerManagers(NUM_OF_SCMS).setNumOfActiveSCMs(3)
.build();
cluster.waitForClusterToBeReady();
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
index 74bbbb7f8c..4639b3cd69 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
@@ -73,14 +73,11 @@
*/
public class TestSCMDbCheckpointServlet {
private MiniOzoneCluster cluster = null;
- private StorageContainerManager scm;
private SCMMetrics scmMetrics;
- private OzoneConfiguration conf;
private HttpServletRequest requestMock;
private HttpServletResponse responseMock;
private String method;
private SCMDBCheckpointServlet scmDbCheckpointServletMock;
- private ServletContext servletContextMock;
/**
* Create a MiniDFSCluster for testing.
@@ -91,12 +88,12 @@ public class TestSCMDbCheckpointServlet {
*/
@BeforeEach
public void init() throws Exception {
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
conf.setBoolean(OZONE_ACL_ENABLED, true);
cluster = MiniOzoneCluster.newBuilder(conf)
.build();
cluster.waitForClusterToBeReady();
- scm = cluster.getStorageContainerManager();
+ StorageContainerManager scm = cluster.getStorageContainerManager();
scmMetrics = StorageContainerManager.getMetrics();
requestMock = mock(HttpServletRequest.class);
@@ -125,7 +122,7 @@ public void init() throws Exception {
doCallRealMethod().when(scmDbCheckpointServletMock)
.processMetadataSnapshotRequest(any(), any(), anyBoolean(),
anyBoolean());
- servletContextMock = mock(ServletContext.class);
+ ServletContext servletContextMock = mock(ServletContext.class);
when(scmDbCheckpointServletMock.getServletContext())
.thenReturn(servletContextMock);
when(servletContextMock.getAttribute(OzoneConsts.SCM_CONTEXT_ATTRIBUTE))
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java
index d63e94dc04..b1a9ac4afc 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java
@@ -66,12 +66,13 @@
@Flaky("HDDS-5631")
public class TestSCMInstallSnapshotWithHA {
+ private static final String OM_SERVICE_ID = "om-service-test1";
+ private static final String SCM_SERVICE_ID = "scm-service-test1";
+ private static final int NUM_OF_OMS = 1;
+ private static final int NUM_OF_SCMS = 3;
+
private MiniOzoneHAClusterImpl cluster = null;
private OzoneConfiguration conf;
- private String omServiceId;
- private String scmServiceId;
- private int numOfOMs = 1;
- private int numOfSCMs = 3;
private static final long SNAPSHOT_THRESHOLD = 5;
private static final int LOG_PURGE_GAP = 5;
@@ -84,8 +85,6 @@ public class TestSCMInstallSnapshotWithHA {
@BeforeEach
public void init() throws Exception {
conf = new OzoneConfiguration();
- omServiceId = "om-service-test1";
- scmServiceId = "scm-service-test1";
conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_RAFT_LOG_PURGE_ENABLED, true);
conf.setInt(ScmConfigKeys.OZONE_SCM_HA_RAFT_LOG_PURGE_GAP, LOG_PURGE_GAP);
@@ -93,10 +92,10 @@ public void init() throws Exception {
SNAPSHOT_THRESHOLD);
cluster = MiniOzoneCluster.newHABuilder(conf)
- .setOMServiceId(omServiceId)
- .setSCMServiceId(scmServiceId)
- .setNumOfOzoneManagers(numOfOMs)
- .setNumOfStorageContainerManagers(numOfSCMs)
+ .setOMServiceId(OM_SERVICE_ID)
+ .setSCMServiceId(SCM_SERVICE_ID)
+ .setNumOfOzoneManagers(NUM_OF_OMS)
+ .setNumOfStorageContainerManagers(NUM_OF_SCMS)
.setNumOfActiveSCMs(2)
.build();
cluster.waitForClusterToBeReady();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
index f9f7a4d72e..703e6bf30c 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
@@ -39,11 +39,10 @@
*/
public class TestSCMSnapshot {
private static MiniOzoneCluster cluster;
- private static OzoneConfiguration conf;
@BeforeAll
public static void setup() throws Exception {
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s");
conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, 1L);
cluster = MiniOzoneCluster
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java
index 57179ec3d5..30daaaf14f 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java
@@ -92,7 +92,6 @@ public final class TestSecretKeySnapshot {
private File workDir;
private File ozoneKeytab;
private File spnegoKeytab;
- private String host;
private MiniOzoneHAClusterImpl cluster;
@BeforeEach
@@ -160,8 +159,8 @@ private void startMiniKdc() throws Exception {
private void setSecureConfig() throws IOException {
conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
- host = InetAddress.getLocalHost().getCanonicalHostName()
- .toLowerCase();
+ String host = InetAddress.getLocalHost().getCanonicalHostName()
+ .toLowerCase();
conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name());
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java
index 7de9cdd015..2825683f1a 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java
@@ -45,12 +45,11 @@ public class TestStorageContainerManagerHA {
private static final Logger LOG =
LoggerFactory.getLogger(TestStorageContainerManagerHA.class);
private MiniOzoneHAClusterImpl cluster;
- private OzoneConfiguration conf;
private static final int OM_COUNT = 3;
private static final int SCM_COUNT = 3;
public void init() throws Exception {
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s");
conf.set(ScmConfigKeys.OZONE_SCM_HA_DBTRANSACTIONBUFFER_FLUSH_INTERVAL,
"5s");
@@ -114,7 +113,7 @@ public void testBootStrapSCM() throws Exception {
public void testSCMLeadershipMetric() throws IOException,
InterruptedException {
// GIVEN
int scmInstancesCount = 3;
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
MiniOzoneHAClusterImpl.Builder haMiniClusterBuilder =
MiniOzoneCluster.newHABuilder(conf)
.setSCMServiceId("scm-service-id")
.setOMServiceId("om-service-id")
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java
index 432818b8f2..79bdfc2f70 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java
@@ -77,6 +77,10 @@
*/
@Flaky("HDDS-5818")
public class TestWatchForCommit {
+ private static final int CHUNK_SIZE = 100;
+ private static final int FLUSH_SIZE = 2 * CHUNK_SIZE;
+ private static final int MAX_FLUSH_SIZE = 2 * FLUSH_SIZE;
+ private static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE;
private MiniOzoneCluster cluster;
private OzoneConfiguration conf;
@@ -85,10 +89,6 @@ public class TestWatchForCommit {
private String volumeName;
private String bucketName;
private String keyString;
- private int chunkSize;
- private int flushSize;
- private int maxFlushSize;
- private int blockSize;
private StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
@@ -102,10 +102,6 @@ public class TestWatchForCommit {
@BeforeEach
public void init() throws Exception {
conf = new OzoneConfiguration();
- chunkSize = 100;
- flushSize = 2 * chunkSize;
- maxFlushSize = 2 * flushSize;
- blockSize = 2 * maxFlushSize;
OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
clientConfig.setStreamBufferFlushDelay(false);
@@ -138,10 +134,10 @@ public void init() throws Exception {
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS);
ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
+ .setBlockSize(BLOCK_SIZE)
+ .setChunkSize(CHUNK_SIZE)
+ .setStreamBufferFlushSize(FLUSH_SIZE)
+ .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
.applyTo(conf);
cluster = MiniOzoneCluster.newBuilder(conf)
@@ -181,7 +177,7 @@ private String getKeyName() {
public void testWatchForCommitWithKeyWrite() throws Exception {
String keyName = getKeyName();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
- int dataLength = maxFlushSize + 50;
+ int dataLength = MAX_FLUSH_SIZE + 50;
// write data more than 1 chunk
byte[] data1 =
ContainerTestHelper.getFixedLengthString(keyString, dataLength)
@@ -199,13 +195,13 @@ public void testWatchForCommitWithKeyWrite() throws
Exception {
assertEquals(4, blockOutputStream.getBufferPool().getSize());
// writtenDataLength as well flushedDataLength will be updated here
assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
- assertEquals(maxFlushSize,
+ assertEquals(MAX_FLUSH_SIZE,
blockOutputStream.getTotalDataFlushedLength());
// since data equals to maxBufferSize is written, this will be a blocking
// call and hence will wait for atleast flushSize worth of data to get
// acked by all servers right here
assertThat(blockOutputStream.getTotalAckDataLength())
- .isGreaterThanOrEqualTo(flushSize);
+ .isGreaterThanOrEqualTo(FLUSH_SIZE);
// watchForCommit will clean up atleast one entry from the map where each
// entry corresponds to flushSize worth of data
assertThat(blockOutputStream.getCommitIndex2flushedDataMap().size())
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java
index e1003859f8..157c3dedd3 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java
@@ -55,14 +55,13 @@ public class TestXceiverClientMetrics {
private volatile boolean breakFlag;
private CountDownLatch latch;
- private static OzoneConfiguration config;
private static MiniOzoneCluster cluster;
private static StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
@BeforeAll
public static void init() throws Exception {
- config = new OzoneConfiguration();
+ OzoneConfiguration config = new OzoneConfiguration();
cluster = MiniOzoneCluster.newBuilder(config).build();
cluster.waitForClusterToBeReady();
storageContainerLocationClient = cluster
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
index a7bee7f3d8..f743ff82a2 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
@@ -46,11 +46,9 @@
public class TestMultiRaftSetup {
private MiniOzoneCluster cluster;
- private StorageContainerManager scm;
private NodeManager nodeManager;
private PipelineManager pipelineManager;
- private long pipelineDestroyTimeoutInMillis;
private static final ReplicationConfig RATIS_THREE =
ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.THREE);
@@ -60,11 +58,11 @@ public void init(int dnCount, OzoneConfiguration conf)
throws Exception {
MiniOzoneCluster.newBuilder(conf).setNumDatanodes(dnCount).build();
conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 1000,
TimeUnit.MILLISECONDS);
- pipelineDestroyTimeoutInMillis = 1000;
+ long pipelineDestroyTimeoutInMillis = 1000;
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT,
pipelineDestroyTimeoutInMillis, TimeUnit.MILLISECONDS);
cluster.waitForClusterToBeReady();
- scm = cluster.getStorageContainerManager();
+ StorageContainerManager scm = cluster.getStorageContainerManager();
nodeManager = scm.getScmNodeManager();
pipelineManager = scm.getPipelineManager();
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
index c8bd8937b5..1763d9e269 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
@@ -74,14 +74,11 @@
public class TestPipelineClose {
private MiniOzoneCluster cluster;
- private OzoneConfiguration conf;
private StorageContainerManager scm;
private ContainerWithPipeline ratisContainer;
private ContainerManager containerManager;
private PipelineManager pipelineManager;
- private long pipelineDestroyTimeoutInMillis;
-
/**
* Create a MiniDFSCluster for testing.
*
@@ -89,14 +86,14 @@ public class TestPipelineClose {
*/
@BeforeAll
public void init() throws Exception {
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s");
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s");
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s");
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 1000,
TimeUnit.MILLISECONDS);
- pipelineDestroyTimeoutInMillis = 1000;
+ long pipelineDestroyTimeoutInMillis = 1000;
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT,
pipelineDestroyTimeoutInMillis, TimeUnit.MILLISECONDS);
cluster.waitForClusterToBeReady();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
index f45f29afb7..3623da1241 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
@@ -54,12 +54,11 @@
public class TestSCMPipelineBytesWrittenMetrics {
private MiniOzoneCluster cluster;
- private OzoneConfiguration conf;
private OzoneClient client;
@BeforeEach
public void setup() throws Exception {
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
conf.setBoolean(OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false);
conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 1);
conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 10, TimeUnit.SECONDS);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
index c6443842d3..bbfd1aee8f 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
@@ -43,10 +43,8 @@
public class TestSCMRestart {
private static MiniOzoneCluster cluster;
- private static OzoneConfiguration conf;
private static Pipeline ratisPipeline1;
private static Pipeline ratisPipeline2;
- private static ContainerManager containerManager;
private static ContainerManager newContainerManager;
private static PipelineManager pipelineManager;
@@ -58,7 +56,7 @@ public class TestSCMRestart {
@BeforeAll
public static void init() throws Exception {
final int numOfNodes = 4;
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1000,
TimeUnit.MILLISECONDS);
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS);
@@ -71,7 +69,7 @@ public static void init() throws Exception {
.build();
cluster.waitForClusterToBeReady();
StorageContainerManager scm = cluster.getStorageContainerManager();
- containerManager = scm.getContainerManager();
+ ContainerManager containerManager = scm.getContainerManager();
pipelineManager = scm.getPipelineManager();
ratisPipeline1 = pipelineManager.getPipeline(
containerManager.allocateContainer(
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
index 134f7f6ea8..135a8389c3 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
@@ -54,11 +54,10 @@
public class TestSCMSafeModeWithPipelineRules {
private MiniOzoneCluster cluster;
- private OzoneConfiguration conf;
private PipelineManager pipelineManager;
public void setup(int numDatanodes) throws Exception {
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
100, TimeUnit.MILLISECONDS);
conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "10s");
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
index efd0fe25bc..af2fe2a5d0 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
@@ -19,6 +19,7 @@
import static java.util.Collections.singletonList;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConsts.MB;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
@@ -72,17 +73,15 @@
* Class to test CommitWatcher functionality.
*/
public class TestCommitWatcher {
+ private static final int CHUNK_SIZE = (int)(1 * MB);
+ private static final long FLUSH_SIZE = (long) 2 * CHUNK_SIZE;
+ private static final long MAX_FLUSH_SIZE = 2 * FLUSH_SIZE;
+ private static final long BLOCK_SIZE = 2 * MAX_FLUSH_SIZE;
+ private static final String VOLUME_NAME = "testblockoutputstream";
private MiniOzoneCluster cluster;
private OzoneConfiguration conf = new OzoneConfiguration();
private OzoneClient client;
- private ObjectStore objectStore;
- private int chunkSize;
- private long flushSize;
- private long maxFlushSize;
- private long blockSize;
- private String volumeName;
- private String bucketName;
private StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
@@ -95,10 +94,6 @@ public class TestCommitWatcher {
*/
@BeforeEach
public void init() throws Exception {
- chunkSize = (int)(1 * OzoneConsts.MB);
- flushSize = (long) 2 * chunkSize;
- maxFlushSize = 2 * flushSize;
- blockSize = 2 * maxFlushSize;
// Make sure the pipeline does not get destroyed quickly
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
10, TimeUnit.SECONDS);
@@ -127,10 +122,10 @@ public void init() throws Exception {
conf.setFromObject(clientConfig);
ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
+ .setBlockSize(BLOCK_SIZE)
+ .setChunkSize(CHUNK_SIZE)
+ .setStreamBufferFlushSize(FLUSH_SIZE)
+ .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
.applyTo(conf);
conf.setQuietMode(false);
@@ -142,11 +137,9 @@ public void init() throws Exception {
cluster.waitForClusterToBeReady();
//the easiest way to create an open container is creating a key
client = OzoneClientFactory.getRpcClient(conf);
- objectStore = client.getObjectStore();
- volumeName = "testblockoutputstream";
- bucketName = volumeName;
- objectStore.createVolume(volumeName);
- objectStore.getVolume(volumeName).createBucket(bucketName);
+ ObjectStore objectStore = client.getObjectStore();
+ objectStore.createVolume(VOLUME_NAME);
+ objectStore.getVolume(VOLUME_NAME).createBucket(VOLUME_NAME);
storageContainerLocationClient = cluster
.getStorageContainerLocationClient();
}
@@ -165,7 +158,7 @@ public void shutdown() {
@Test
public void testReleaseBuffers() throws Exception {
int capacity = 2;
- BufferPool bufferPool = new BufferPool(chunkSize, capacity);
+ BufferPool bufferPool = new BufferPool(CHUNK_SIZE, capacity);
try (XceiverClientManager mgr = new XceiverClientManager(conf)) {
ContainerWithPipeline container = storageContainerLocationClient
.allocateContainer(HddsProtos.ReplicationType.RATIS,
@@ -184,7 +177,7 @@ public void testReleaseBuffers() throws Exception {
for (int i = 0; i < capacity; i++) {
ContainerCommandRequestProto writeChunkRequest =
ContainerTestHelper
- .getWriteChunkRequest(pipeline, blockID, chunkSize);
+ .getWriteChunkRequest(pipeline, blockID, CHUNK_SIZE);
// add the data to the buffer pool
final ChunkBuffer byteBuffer = bufferPool.allocateBuffer(0);
byteBuffer.put(writeChunkRequest.getWriteChunk().getData());
@@ -217,10 +210,10 @@ public void testReleaseBuffers() throws Exception {
getCommitIndexMap().size());
watcher.watchOnFirstIndex();
assertThat(watcher.getCommitIndexMap()).doesNotContainKey(replies.get(0).getLogIndex());
-
assertThat(watcher.getTotalAckDataLength()).isGreaterThanOrEqualTo(chunkSize);
+
assertThat(watcher.getTotalAckDataLength()).isGreaterThanOrEqualTo(CHUNK_SIZE);
watcher.watchOnLastIndex();
assertThat(watcher.getCommitIndexMap()).doesNotContainKey(replies.get(1).getLogIndex());
- assertEquals(2 * chunkSize, watcher.getTotalAckDataLength());
+ assertEquals(2 * CHUNK_SIZE, watcher.getTotalAckDataLength());
assertThat(watcher.getCommitIndexMap()).isEmpty();
}
} finally {
@@ -231,7 +224,7 @@ public void testReleaseBuffers() throws Exception {
@Test
public void testReleaseBuffersOnException() throws Exception {
int capacity = 2;
- BufferPool bufferPool = new BufferPool(chunkSize, capacity);
+ BufferPool bufferPool = new BufferPool(CHUNK_SIZE, capacity);
try (XceiverClientManager mgr = new XceiverClientManager(conf)) {
ContainerWithPipeline container = storageContainerLocationClient
.allocateContainer(HddsProtos.ReplicationType.RATIS,
@@ -250,7 +243,7 @@ public void testReleaseBuffersOnException() throws
Exception {
for (int i = 0; i < capacity; i++) {
ContainerCommandRequestProto writeChunkRequest =
ContainerTestHelper
- .getWriteChunkRequest(pipeline, blockID, chunkSize);
+ .getWriteChunkRequest(pipeline, blockID, CHUNK_SIZE);
// add the data to the buffer pool
final ChunkBuffer byteBuffer = bufferPool.allocateBuffer(0);
byteBuffer.put(writeChunkRequest.getWriteChunk().getData());
@@ -283,7 +276,7 @@ public void testReleaseBuffersOnException() throws
Exception {
assertEquals(2, watcher.getCommitIndexMap().size());
watcher.watchOnFirstIndex();
assertThat(watcher.getCommitIndexMap()).doesNotContainKey(replies.get(0).getLogIndex());
-
assertThat(watcher.getTotalAckDataLength()).isGreaterThanOrEqualTo(chunkSize);
+
assertThat(watcher.getTotalAckDataLength()).isGreaterThanOrEqualTo(CHUNK_SIZE);
cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
cluster.shutdownHddsDatanode(pipeline.getNodes().get(1));
// just watch for a higher index so as to ensure, it does an actual
@@ -305,10 +298,10 @@ public void testReleaseBuffersOnException() throws
Exception {
"Unexpected exception: " + t.getClass());
if (ratisClient.getReplicatedMinCommitIndex() < replies.get(1)
.getLogIndex()) {
- assertEquals(chunkSize, watcher.getTotalAckDataLength());
+ assertEquals(CHUNK_SIZE, watcher.getTotalAckDataLength());
assertEquals(1, watcher.getCommitIndexMap().size());
} else {
- assertEquals(2 * chunkSize, watcher.getTotalAckDataLength());
+ assertEquals(2 * CHUNK_SIZE, watcher.getTotalAckDataLength());
assertThat(watcher.getCommitIndexMap()).isEmpty();
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
index 4b5847f43b..7e60477447 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
@@ -163,7 +163,6 @@ public class TestContainerCommandsEC {
private static CertificateClient certClient;
private static OzoneBucket classBucket;
- private static OzoneVolume classVolume;
private static ReplicationConfig repConfig;
@BeforeAll
@@ -1016,7 +1015,7 @@ public static void prepareData(int[][] ranges) throws
Exception {
final String volumeName = UUID.randomUUID().toString();
final String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
- classVolume = store.getVolume(volumeName);
+ OzoneVolume classVolume = store.getVolume(volumeName);
classVolume.createBucket(bucketName);
classBucket = classVolume.getBucket(bucketName);
repConfig =
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java
index d8d8ac08cc..90c226f907 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java
@@ -114,7 +114,6 @@ public final class TestBlockTokens {
private static File spnegoKeytab;
private static File testUserKeytab;
private static String testUserPrincipal;
- private static String host;
private static MiniOzoneHAClusterImpl cluster;
private static OzoneClient client;
private static BlockInputStreamFactory blockInputStreamFactory =
@@ -341,8 +340,8 @@ private static void startMiniKdc() throws Exception {
private static void setSecureConfig() throws IOException {
conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
- host = InetAddress.getLocalHost().getCanonicalHostName()
- .toLowerCase();
+ String host = InetAddress.getLocalHost().getCanonicalHostName()
+ .toLowerCase();
conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name());
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java
index 6904ad6c1e..34a1c0388e 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java
@@ -89,7 +89,6 @@ public final class TestBlockTokensCLI {
private static OzoneConfiguration conf;
private static File ozoneKeytab;
private static File spnegoKeytab;
- private static String host;
private static String omServiceId;
private static String scmServiceId;
private static MiniOzoneHAClusterImpl cluster;
@@ -154,8 +153,8 @@ private static void startMiniKdc() throws Exception {
private static void setSecureConfig() throws IOException {
conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
- host = InetAddress.getLocalHost().getCanonicalHostName()
- .toLowerCase();
+ String host = InetAddress.getLocalHost().getCanonicalHostName()
+ .toLowerCase();
conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name());
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java
index 1a5fe8bbf8..cfce524537 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java
@@ -62,7 +62,6 @@ public class TestOMSortDatanodes {
private static StorageContainerManager scm;
private static NodeManager nodeManager;
private static KeyManagerImpl keyManager;
- private static StorageContainerLocationProtocol mockScmContainerClient;
private static OzoneManager om;
private static final int NODE_COUNT = 10;
private static final Map<String, String> EDGE_NODES = ImmutableMap.of(
@@ -100,8 +99,7 @@ public static void setup() throws Exception {
scm.exitSafeMode();
nodeManager = scm.getScmNodeManager();
datanodes.forEach(dn -> nodeManager.register(dn, null, null));
- mockScmContainerClient =
- mock(StorageContainerLocationProtocol.class);
+ StorageContainerLocationProtocol mockScmContainerClient =
mock(StorageContainerLocationProtocol.class);
OmTestManagers omTestManagers
= new OmTestManagers(config, scm.getBlockProtocolServer(),
mockScmContainerClient);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
index e282e7e6a4..5cd57f9db1 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
@@ -226,15 +226,15 @@ abstract class OzoneRpcClientTests extends OzoneTestBase {
private static StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
private static String remoteUserName = "remoteUser";
- private static String remoteGroupName = "remoteGroup";
+ private static final String REMOTE_GROUP_NAME = "remoteGroup";
private static OzoneAcl defaultUserAcl = OzoneAcl.of(USER, remoteUserName,
DEFAULT, READ);
- private static OzoneAcl defaultGroupAcl = OzoneAcl.of(GROUP, remoteGroupName,
+ private static OzoneAcl defaultGroupAcl = OzoneAcl.of(GROUP,
REMOTE_GROUP_NAME,
DEFAULT, READ);
private static OzoneAcl inheritedUserAcl = OzoneAcl.of(USER, remoteUserName,
ACCESS, READ);
private static OzoneAcl inheritedGroupAcl = OzoneAcl.of(GROUP,
- remoteGroupName, ACCESS, READ);
+ REMOTE_GROUP_NAME, ACCESS, READ);
private static MessageDigest eTagProvider;
private static Set<OzoneClient> ozoneClients = new HashSet<>();
private static GenericTestUtils.PrintStreamCapturer output;
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
index df19c69d43..ab410c2e40 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
@@ -72,7 +72,6 @@
public class TestContainerReplicationEndToEnd {
private static MiniOzoneCluster cluster;
- private static OzoneConfiguration conf;
private static OzoneClient client;
private static ObjectStore objectStore;
private static String volumeName;
@@ -87,7 +86,7 @@ public class TestContainerReplicationEndToEnd {
*/
@BeforeAll
public static void init() throws Exception {
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
containerReportInterval = 2000;
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
index 6e8309e09e..a788b3aa9b 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -117,7 +117,6 @@
public class TestContainerStateMachineFailures {
private static MiniOzoneCluster cluster;
- private static OzoneConfiguration conf;
private static OzoneClient client;
private static ObjectStore objectStore;
private static String volumeName;
@@ -131,7 +130,7 @@ public class TestContainerStateMachineFailures {
*/
@BeforeAll
public static void init() throws Exception {
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
clientConfig.setStreamBufferFlushDelay(false);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
index 4ca170dcd1..ea1aeee903 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
@@ -62,16 +62,17 @@
* Tests the containerStateMachine failure handling by set flush delay.
*/
public class TestContainerStateMachineFlushDelay {
+ private static final int CHUNK_SIZE = 100;
+ private static final int FLUSH_SIZE = 2 * CHUNK_SIZE;
+ private static final int MAX_FLUSH_SIZE = 2 * FLUSH_SIZE;
+ private static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE;
+
private MiniOzoneCluster cluster;
private OzoneConfiguration conf = new OzoneConfiguration();
private OzoneClient client;
private ObjectStore objectStore;
private String volumeName;
private String bucketName;
- private int chunkSize;
- private int flushSize;
- private int maxFlushSize;
- private int blockSize;
private String keyString;
/**
@@ -81,10 +82,6 @@ public class TestContainerStateMachineFlushDelay {
*/
@BeforeEach
public void setup() throws Exception {
- chunkSize = 100;
- flushSize = 2 * chunkSize;
- maxFlushSize = 2 * flushSize;
- blockSize = 2 * maxFlushSize;
keyString = UUID.randomUUID().toString();
conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true);
@@ -104,10 +101,10 @@ public void setup() throws Exception {
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s");
ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
+ .setBlockSize(BLOCK_SIZE)
+ .setChunkSize(CHUNK_SIZE)
+ .setStreamBufferFlushSize(FLUSH_SIZE)
+ .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
.applyTo(conf);
cluster =
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
index 178e3db7f7..6d38fdc6c7 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
@@ -84,7 +84,6 @@
public class TestDeleteWithInAdequateDN {
private static MiniOzoneCluster cluster;
- private static OzoneConfiguration conf;
private static OzoneClient client;
private static ObjectStore objectStore;
private static String volumeName;
@@ -101,7 +100,7 @@ public class TestDeleteWithInAdequateDN {
public static void init() throws Exception {
final int numOfDatanodes = 3;
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 100,
TimeUnit.MILLISECONDS);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
index fdbdb04dfd..2f83af53c8 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
@@ -69,15 +69,14 @@
* Tests Exception handling by Ozone Client by set flush delay.
*/
public class TestFailureHandlingByClientFlushDelay {
+ private static final int CHUNK_SIZE = 100;
+ private static final int FLUSH_SIZE = 2 * CHUNK_SIZE;
+ private static final int MAX_FLUSH_SIZE = 2 * FLUSH_SIZE;
+ private static final int BLOCK_SIZE = 4 * CHUNK_SIZE;
private MiniOzoneCluster cluster;
- private OzoneConfiguration conf;
private OzoneClient client;
private ObjectStore objectStore;
- private int chunkSize;
- private int flushSize;
- private int maxFlushSize;
- private int blockSize;
private String volumeName;
private String bucketName;
private String keyString;
@@ -90,11 +89,7 @@ public class TestFailureHandlingByClientFlushDelay {
* @throws IOException
*/
private void init() throws Exception {
- conf = new OzoneConfiguration();
- chunkSize = 100;
- flushSize = 2 * chunkSize;
- maxFlushSize = 2 * flushSize;
- blockSize = 4 * chunkSize;
+ OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS);
RatisClientConfig ratisClientConfig =
@@ -130,10 +125,10 @@ private void init() throws Exception {
"/rack1");
ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
+ .setBlockSize(BLOCK_SIZE)
+ .setChunkSize(CHUNK_SIZE)
+ .setStreamBufferFlushSize(FLUSH_SIZE)
+ .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
.applyTo(conf);
cluster = MiniOzoneCluster.newBuilder(conf)
@@ -170,9 +165,9 @@ public void testPipelineExclusionWithPipelineFailure()
throws Exception {
startCluster();
String keyName = UUID.randomUUID().toString();
OzoneOutputStream key =
- createKey(keyName, ReplicationType.RATIS, blockSize);
+ createKey(keyName, ReplicationType.RATIS, BLOCK_SIZE);
String data = ContainerTestHelper
- .getFixedLengthString(keyString, chunkSize);
+ .getFixedLengthString(keyString, CHUNK_SIZE);
// get the name of a valid container
KeyOutputStream keyOutputStream =
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
index 4a87fb4951..7628819e7e 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
@@ -56,7 +56,6 @@
*/
public class TestHybridPipelineOnDatanode {
private static MiniOzoneCluster cluster;
- private static OzoneConfiguration conf;
private static OzoneClient client;
private static ObjectStore objectStore;
@@ -69,7 +68,7 @@ public class TestHybridPipelineOnDatanode {
*/
@BeforeAll
public static void init() throws Exception {
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 5);
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3)
.build();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
index 76220aa4f3..fba29d210f 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
@@ -62,7 +62,6 @@
*/
public class TestMultiBlockWritesWithDnFailures {
private MiniOzoneCluster cluster;
- private OzoneConfiguration conf;
private OzoneClient client;
private ObjectStore objectStore;
private int chunkSize;
@@ -79,7 +78,7 @@ public class TestMultiBlockWritesWithDnFailures {
* @throws IOException
*/
private void startCluster(int datanodes) throws Exception {
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
chunkSize = (int) OzoneConsts.MB;
blockSize = 4 * chunkSize;
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
index db82170022..6628673bb4 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
@@ -60,14 +60,15 @@
* flush delay.
*/
public class TestOzoneClientRetriesOnExceptionFlushDelay {
+ private static final int CHUNK_SIZE = 100;
+ private static final int FLUSH_SIZE = 2 * CHUNK_SIZE;
+ private static final int MAX_FLUSH_SIZE = 2 * FLUSH_SIZE;
+ private static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE;
+
private MiniOzoneCluster cluster;
private OzoneConfiguration conf = new OzoneConfiguration();
private OzoneClient client;
private ObjectStore objectStore;
- private int chunkSize;
- private int flushSize;
- private int maxFlushSize;
- private int blockSize;
private String volumeName;
private String bucketName;
private String keyString;
@@ -82,11 +83,6 @@ public class TestOzoneClientRetriesOnExceptionFlushDelay {
*/
@BeforeEach
public void init() throws Exception {
- chunkSize = 100;
- flushSize = 2 * chunkSize;
- maxFlushSize = 2 * flushSize;
- blockSize = 2 * maxFlushSize;
-
OzoneClientConfig config = conf.getObject(OzoneClientConfig.class);
config.setChecksumType(ChecksumType.NONE);
config.setMaxRetryCount(3);
@@ -99,10 +95,10 @@ public void init() throws Exception {
conf.setQuietMode(false);
ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
+ .setBlockSize(BLOCK_SIZE)
+ .setChunkSize(CHUNK_SIZE)
+ .setStreamBufferFlushSize(FLUSH_SIZE)
+ .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
.applyTo(conf);
cluster = MiniOzoneCluster.newBuilder(conf)
@@ -139,7 +135,7 @@ public void shutdown() {
public void testGroupMismatchExceptionHandling() throws Exception {
String keyName = getKeyName();
// make sure flush will sync data.
- int dataLength = maxFlushSize + chunkSize;
+ int dataLength = MAX_FLUSH_SIZE + CHUNK_SIZE;
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS,
dataLength);
// write data more than 1 chunk
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
index 72129713e8..217b915968 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
@@ -69,14 +69,15 @@ public class TestOzoneClientRetriesOnExceptions {
private static final int MAX_RETRIES = 3;
+ private static final int CHUNK_SIZE = 100;
+ private static final int FLUSH_SIZE = 2 * CHUNK_SIZE;
+ private static final int MAX_FLUSH_SIZE = 2 * FLUSH_SIZE;
+ private static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE;
+
private MiniOzoneCluster cluster;
private OzoneConfiguration conf = new OzoneConfiguration();
private OzoneClient client;
private ObjectStore objectStore;
- private int chunkSize;
- private int flushSize;
- private int maxFlushSize;
- private int blockSize;
private String volumeName;
private String bucketName;
private String keyString;
@@ -91,11 +92,6 @@ public class TestOzoneClientRetriesOnExceptions {
*/
@BeforeEach
public void init() throws Exception {
- chunkSize = 100;
- flushSize = 2 * chunkSize;
- maxFlushSize = 2 * flushSize;
- blockSize = 2 * maxFlushSize;
-
OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
clientConfig.setMaxRetryCount(MAX_RETRIES);
clientConfig.setChecksumType(ChecksumType.NONE);
@@ -110,10 +106,10 @@ public void init() throws Exception {
conf.setQuietMode(false);
ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
+ .setBlockSize(BLOCK_SIZE)
+ .setChunkSize(CHUNK_SIZE)
+ .setStreamBufferFlushSize(FLUSH_SIZE)
+ .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
.applyTo(conf);
cluster = MiniOzoneCluster.newBuilder(conf)
@@ -149,7 +145,7 @@ public void shutdown() {
@Test
public void testGroupMismatchExceptionHandling() throws Exception {
String keyName = getKeyName();
- int dataLength = maxFlushSize + 50;
+ int dataLength = MAX_FLUSH_SIZE + 50;
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS,
dataLength);
// write data more than 1 chunk
@@ -193,13 +189,13 @@ public void testGroupMismatchExceptionHandling() throws
Exception {
void testMaxRetriesByOzoneClient() throws Exception {
String keyName = getKeyName();
try (OzoneOutputStream key = createKey(
- keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize)) {
+ keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * BLOCK_SIZE)) {
KeyOutputStream keyOutputStream =
assertInstanceOf(KeyOutputStream.class, key.getOutputStream());
List<BlockOutputStreamEntry> entries =
keyOutputStream.getStreamEntries();
assertEquals((MAX_RETRIES + 1),
keyOutputStream.getStreamEntries().size());
- int dataLength = maxFlushSize + 50;
+ int dataLength = MAX_FLUSH_SIZE + 50;
// write data more than 1 chunk
byte[] data1 =
ContainerTestHelper.getFixedLengthString(keyString, dataLength)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
index c4af662f61..066de7608d 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
@@ -77,29 +77,24 @@
* Tests the EC recovery and over replication processing.
*/
public class TestECContainerRecovery {
+ private static final int CHUNK_SIZE = 1024 * 1024;
+ private static final int FLUSH_SIZE = 2 * CHUNK_SIZE;
+ private static final int MAX_FLUSH_SIZE = 2 * FLUSH_SIZE;
+ private static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE;
+
private static MiniOzoneCluster cluster;
private static OzoneConfiguration conf = new OzoneConfiguration();
private static OzoneClient client;
private static ObjectStore objectStore;
- private static int chunkSize;
- private static int flushSize;
- private static int maxFlushSize;
- private static int blockSize;
private static String volumeName;
- private static String bucketName;
private static int dataBlocks = 3;
- private static byte[][] inputChunks = new byte[dataBlocks][chunkSize];
+ private static byte[][] inputChunks = new byte[dataBlocks][CHUNK_SIZE];
/**
* Create a MiniDFSCluster for testing.
*/
@BeforeAll
public static void init() throws Exception {
- chunkSize = 1024 * 1024;
- flushSize = 2 * chunkSize;
- maxFlushSize = 2 * flushSize;
- blockSize = 2 * maxFlushSize;
-
OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE);
clientConfig.setStreamBufferFlushDelay(false);
@@ -139,10 +134,10 @@ public static void init() throws Exception {
conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10);
ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
+ .setBlockSize(BLOCK_SIZE)
+ .setChunkSize(CHUNK_SIZE)
+ .setStreamBufferFlushSize(FLUSH_SIZE)
+ .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
.applyTo(conf);
cluster = MiniOzoneCluster.newBuilder(conf)
@@ -152,7 +147,7 @@ public static void init() throws Exception {
client = OzoneClientFactory.getRpcClient(conf);
objectStore = client.getObjectStore();
volumeName = UUID.randomUUID().toString();
- bucketName = volumeName;
+ String bucketName = volumeName;
objectStore.createVolume(volumeName);
objectStore.getVolume(volumeName).createBucket(bucketName);
initInputChunks();
@@ -176,7 +171,7 @@ private OzoneBucket getOzoneBucket() throws IOException {
bucketArgs.setDefaultReplicationConfig(
new DefaultReplicationConfig(
new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS,
- chunkSize)));
+ CHUNK_SIZE)));
volume.createBucket(myBucket, bucketArgs.build());
return volume.getBucket(myBucket);
@@ -184,7 +179,7 @@ private OzoneBucket getOzoneBucket() throws IOException {
private static void initInputChunks() {
for (int i = 0; i < dataBlocks; i++) {
- inputChunks[i] = getBytesWith(i + 1, chunkSize);
+ inputChunks[i] = getBytesWith(i + 1, CHUNK_SIZE);
}
}
@@ -205,7 +200,7 @@ public void testContainerRecoveryOverReplicationProcessing()
final Pipeline pipeline;
ECReplicationConfig repConfig =
new ECReplicationConfig(3, 2,
- ECReplicationConfig.EcCodec.RS, chunkSize);
+ ECReplicationConfig.EcCodec.RS, CHUNK_SIZE);
try (OzoneOutputStream out = bucket
.createKey(keyName, 1024, repConfig, new HashMap<>())) {
out.write(inputData);
@@ -273,7 +268,7 @@ public void testECContainerRecoveryWithTimedOutRecovery()
throws Exception {
final Pipeline pipeline;
ECReplicationConfig repConfig =
new ECReplicationConfig(3, 2,
- ECReplicationConfig.EcCodec.RS, chunkSize);
+ ECReplicationConfig.EcCodec.RS, CHUNK_SIZE);
try (OzoneOutputStream out = bucket
.createKey(keyName, 1024, repConfig, new HashMap<>())) {
out.write(inputData);
@@ -407,10 +402,10 @@ private void waitForContainerCount(int count, ContainerID
containerID,
}
private byte[] getInputBytes(int numChunks) {
- byte[] inputData = new byte[numChunks * chunkSize];
+ byte[] inputData = new byte[numChunks * CHUNK_SIZE];
for (int i = 0; i < numChunks; i++) {
- int start = (i * chunkSize);
- Arrays.fill(inputData, start, start + chunkSize - 1,
+ int start = (i * CHUNK_SIZE);
+ Arrays.fill(inputData, start, start + CHUNK_SIZE - 1,
String.valueOf(i % 9).getBytes(UTF_8)[0]);
}
return inputData;
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/checksum/TestContainerCommandReconciliation.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/checksum/TestContainerCommandReconciliation.java
index 3647b8deac..187078ba4f 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/checksum/TestContainerCommandReconciliation.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/checksum/TestContainerCommandReconciliation.java
@@ -150,7 +150,6 @@ public class TestContainerCommandReconciliation {
private static File spnegoKeytab;
private static File testUserKeytab;
private static String testUserPrincipal;
- private static String host;
@BeforeAll
public static void init() throws Exception {
@@ -662,8 +661,8 @@ private static void startMiniKdc() throws Exception {
private static void setSecureConfig() throws IOException {
conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
- host = InetAddress.getLocalHost().getCanonicalHostName()
- .toLowerCase();
+ String host = InetAddress.getLocalHost().getCanonicalHostName()
+ .toLowerCase();
conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name());
String curUser = UserGroupInformation.getCurrentUser().getUserName();
conf.set(OZONE_ADMINISTRATORS, curUser);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java
index 87897d0020..725a8f6749 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java
@@ -59,12 +59,11 @@
public class TestDatanodeHddsVolumeFailureToleration {
private MiniOzoneCluster cluster;
- private OzoneConfiguration ozoneConfig;
private List<HddsDatanodeService> datanodes;
@BeforeEach
public void init() throws Exception {
- ozoneConfig = new OzoneConfiguration();
+ OzoneConfiguration ozoneConfig = new OzoneConfiguration();
ozoneConfig.set(OZONE_SCM_CONTAINER_SIZE, "1GB");
ozoneConfig.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
0, StorageUnit.MB);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
index 31f9f81f94..ad67a40cbe 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
@@ -40,7 +40,6 @@
*/
public class TestFreonWithDatanodeFastRestart {
private static MiniOzoneCluster cluster;
- private static OzoneConfiguration conf;
/**
* Create a MiniDFSCluster for testing.
@@ -50,7 +49,7 @@ public class TestFreonWithDatanodeFastRestart {
*/
@BeforeAll
public static void init() throws Exception {
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000,
TimeUnit.MILLISECONDS);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
index 89732f4572..c996e06170 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
@@ -45,7 +45,6 @@
public class TestFreonWithPipelineDestroy {
private static MiniOzoneCluster cluster;
- private static OzoneConfiguration conf;
/**
* Create a MiniDFSCluster for testing.
@@ -55,7 +54,7 @@ public class TestFreonWithPipelineDestroy {
*/
@BeforeAll
public static void init() throws Exception {
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT,
1, TimeUnit.SECONDS);
conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL,
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java
index d9593ea4f2..f3a8936a23 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java
@@ -57,7 +57,6 @@ public class TestContainerMapper {
private static Path dbPath;
private static MiniOzoneCluster cluster = null;
private static OzoneClient ozClient = null;
- private static ObjectStore store = null;
private static String volName = UUID.randomUUID().toString();
private static String bucketName = UUID.randomUUID().toString();
private static OzoneConfiguration conf;
@@ -78,7 +77,7 @@ public static void init() throws Exception {
.build();
cluster.waitForClusterToBeReady();
ozClient = OzoneClientFactory.getRpcClient(conf);
- store = ozClient.getObjectStore();
+ ObjectStore store = ozClient.getObjectStore();
store.createVolume(volName);
OzoneVolume volume = store.getVolume(volName);
// TODO: HDDS-5463
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
index 0a8b4fb2c8..3969a643c9 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
@@ -158,7 +158,6 @@ public class TestKeyManagerImpl {
private static File dir;
private static PrefixManager prefixManager;
private static KeyManagerImpl keyManager;
- private static NodeManager nodeManager;
private static StorageContainerManager scm;
private static ScmBlockLocationProtocol mockScmBlockLocationProtocol;
private static StorageContainerLocationProtocol mockScmContainerClient;
@@ -184,7 +183,7 @@ public static void setUp() throws Exception {
conf.get(OZONE_OM_ADDRESS_KEY));
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
mockScmBlockLocationProtocol = mock(ScmBlockLocationProtocol.class);
- nodeManager = new MockNodeManager(true, 10);
+ NodeManager nodeManager = new MockNodeManager(true, 10);
NodeSchema[] schemas = new NodeSchema[]
{ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA};
NodeSchemaManager schemaManager = NodeSchemaManager.getInstance();
@@ -194,7 +193,7 @@ public static void setUp() throws Exception {
node.setNetworkName(node.getUuidString());
clusterMap.add(node);
});
- ((MockNodeManager)nodeManager).setNetworkTopology(clusterMap);
+ ((MockNodeManager) nodeManager).setNetworkTopology(clusterMap);
SCMConfigurator configurator = new SCMConfigurator();
configurator.setScmNodeManager(nodeManager);
configurator.setNetworkTopology(clusterMap);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
index f6a8438db4..404a32d217 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
@@ -106,12 +106,12 @@ public class TestOMRatisSnapshots {
// tried up to 1000 snapshots and this test works, but some of the
// timeouts have to be increased.
private static final int SNAPSHOTS_TO_CREATE = 100;
+ private static final String OM_SERVICE_ID = "om-service-test1";
+ private static final int NUM_OF_OMS = 3;
private MiniOzoneHAClusterImpl cluster = null;
private ObjectStore objectStore;
private OzoneConfiguration conf;
- private String omServiceId;
- private int numOfOMs = 3;
private OzoneBucket ozoneBucket;
private String volumeName;
private String bucketName;
@@ -134,7 +134,6 @@ public class TestOMRatisSnapshots {
@BeforeEach
public void init(TestInfo testInfo) throws Exception {
conf = new OzoneConfiguration();
- omServiceId = "om-service-test1";
conf.setInt(OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, LOG_PURGE_GAP);
conf.setStorageSize(OMConfigKeys.OZONE_OM_RATIS_SEGMENT_SIZE_KEY, 16,
StorageUnit.KB);
@@ -158,11 +157,11 @@ public void init(TestInfo testInfo) throws Exception {
cluster = MiniOzoneCluster.newHABuilder(conf)
.setOMServiceId("om-service-test1")
- .setNumOfOzoneManagers(numOfOMs)
+ .setNumOfOzoneManagers(NUM_OF_OMS)
.setNumOfActiveOMs(2)
.build();
cluster.waitForClusterToBeReady();
- client = OzoneClientFactory.getRpcClient(omServiceId, conf);
+ client = OzoneClientFactory.getRpcClient(OM_SERVICE_ID, conf);
objectStore = client.getObjectStore();
volumeName = "volume" + RandomStringUtils.secure().nextNumeric(5);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
index e67cbfac2f..4e69848b30 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
@@ -142,7 +142,6 @@ public class TestOmContainerLocationCache {
private static final String VERSIONED_BUCKET_NAME = "versionedBucket1";
private static final String VOLUME_NAME = "vol1";
private static OzoneManager om;
- private static RpcClient rpcClient;
private static ObjectStore objectStore;
private static XceiverClientGrpc mockDn1Protocol;
private static XceiverClientGrpc mockDn2Protocol;
@@ -182,7 +181,7 @@ public static void setUp() throws Exception {
ozoneClient = omTestManagers.getRpcClient();
metadataManager = omTestManagers.getMetadataManager();
- rpcClient = new RpcClient(conf, null) {
+ RpcClient rpcClient = new RpcClient(conf, null) {
@Nonnull
@Override
protected XceiverClientFactory createXceiverClientFactory(
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
index a05e944514..d6b9d51e92 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
@@ -56,8 +56,6 @@ public class TestOzoneManagerConfiguration {
private OzoneConfiguration conf;
private MiniOzoneCluster cluster;
- private OzoneManager om;
- private OzoneManagerRatisServer omRatisServer;
private static final long RATIS_RPC_TIMEOUT = 500L;
@@ -90,7 +88,7 @@ private void startCluster() throws Exception {
@Test
public void testNoConfiguredOMAddress() throws Exception {
startCluster();
- om = cluster.getOzoneManager();
+ OzoneManager om = cluster.getOzoneManager();
assertTrue(NetUtils.isLocalAddress(
om.getOmRpcServerAddr().getAddress()));
@@ -122,7 +120,7 @@ public void testDefaultPortIfNotSpecified() throws
Exception {
conf.set(OMConfigKeys.OZONE_OM_NODE_ID_KEY, omNode1Id);
startCluster();
- om = cluster.getOzoneManager();
+ OzoneManager om = cluster.getOzoneManager();
assertEquals("0.0.0.0",
om.getOmRpcServerAddr().getHostName());
assertEquals(OMConfigKeys.OZONE_OM_PORT_DEFAULT,
@@ -145,8 +143,8 @@ public void testDefaultPortIfNotSpecified() throws
Exception {
public void testSingleNodeOMservice() throws Exception {
// Default settings of MiniOzoneCluster start a sinle node OM service.
startCluster();
- om = cluster.getOzoneManager();
- omRatisServer = om.getOmRatisServer();
+ OzoneManager om = cluster.getOzoneManager();
+ OzoneManagerRatisServer omRatisServer = om.getOmRatisServer();
assertEquals(LifeCycle.State.RUNNING, om.getOmRatisServerState());
// OM's Ratis server should have only 1 peer (itself) in its RaftGroup
@@ -196,8 +194,8 @@ public void testThreeNodeOMservice() throws Exception {
conf.setInt(omNode3RatisPortKey, 9898);
startCluster();
- om = cluster.getOzoneManager();
- omRatisServer = om.getOmRatisServer();
+ OzoneManager om = cluster.getOzoneManager();
+ OzoneManagerRatisServer omRatisServer = om.getOmRatisServer();
assertEquals(LifeCycle.State.RUNNING, om.getOmRatisServerState());
@@ -271,8 +269,8 @@ public void testOMHAWithUnresolvedAddresses() throws
Exception {
conf.setInt(omNode3RatisPortKey, 9898);
startCluster();
- om = cluster.getOzoneManager();
- omRatisServer = om.getOmRatisServer();
+ OzoneManager om = cluster.getOzoneManager();
+ OzoneManagerRatisServer omRatisServer = om.getOmRatisServer();
// Verify Peer details
List<OMNodeDetails> peerNodes = om.getPeerNodes();
@@ -432,8 +430,8 @@ public void testMultipleOMServiceIds() throws Exception {
"126.0.0.127:9862");
startCluster();
- om = cluster.getOzoneManager();
- omRatisServer = om.getOmRatisServer();
+ OzoneManager om = cluster.getOzoneManager();
+ OzoneManagerRatisServer omRatisServer = om.getOmRatisServer();
assertEquals(LifeCycle.State.RUNNING, om.getOmRatisServerState());
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
index 75a07309f3..f63ecff0f8 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
@@ -56,7 +56,6 @@
*/
public class TestOzoneManagerRestart {
private static MiniOzoneCluster cluster = null;
- private static OzoneConfiguration conf;
private static OzoneClient client;
/**
@@ -68,7 +67,7 @@ public class TestOzoneManagerRestart {
*/
@BeforeAll
public static void init() throws Exception {
- conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
conf.setBoolean(OZONE_ACL_ENABLED, true);
conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD);
conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
index 701ca2c476..9f5690e11e 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
@@ -120,16 +120,12 @@ public class TestRangerBGSyncService {
private OzoneManager ozoneManager;
private OMMetrics omMetrics;
private OMMetadataManager omMetadataManager;
- private OMMultiTenantManager omMultiTenantManager;
- private AuditLogger auditLogger;
- private Tenant tenant;
private static final String TENANT_ID = "tenant1";
// UGI-related vars
private static final String USER_ALICE = "[email protected]";
private static final String USER_ALICE_SHORT = "alice";
- private UserGroupInformation ugiAlice;
private static final String USER_BOB_SHORT = "bob";
private RangerUserRequest rangerUserRequest;
@@ -170,7 +166,7 @@ public void setUp() throws IOException {
"RULE:[2:$1@$0](.*@EXAMPLE.COM)s/@.*//\n" +
"RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\n" +
"DEFAULT");
- ugiAlice = UserGroupInformation.createRemoteUser(USER_ALICE);
+ UserGroupInformation ugiAlice =
UserGroupInformation.createRemoteUser(USER_ALICE);
assertEquals(USER_ALICE_SHORT, ugiAlice.getShortUserName());
ozoneManager = mock(OzoneManager.class);
@@ -189,13 +185,13 @@ public void setUp() throws IOException {
omMetadataManager = new OmMetadataManagerImpl(conf, ozoneManager);
when(ozoneManager.getMetrics()).thenReturn(omMetrics);
when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
- auditLogger = mock(AuditLogger.class);
+ AuditLogger auditLogger = mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
// Multi-tenant related initializations
- omMultiTenantManager = mock(OMMultiTenantManager.class);
- tenant = mock(Tenant.class);
+ OMMultiTenantManager omMultiTenantManager =
mock(OMMultiTenantManager.class);
+ Tenant tenant = mock(Tenant.class);
when(ozoneManager.getMultiTenantManager()).thenReturn(omMultiTenantManager);
when(ozoneManager.getConfiguration()).thenReturn(conf);
when(ozoneManager.isLeaderReady()).thenReturn(true);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRootedDDSWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRootedDDSWithFSO.java
index 3fc7d15f23..426515c5c7 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRootedDDSWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRootedDDSWithFSO.java
@@ -70,8 +70,6 @@ public class TestRootedDDSWithFSO {
private static MiniOzoneCluster cluster;
private static FileSystem fs;
- private static String volumeName;
- private static String bucketName;
private static Path volumePath;
private static Path bucketPath;
private static OzoneClient client;
@@ -94,9 +92,9 @@ public static void init() throws Exception {
// create a volume and a bucket to be used by OzoneFileSystem
OzoneBucket bucket =
TestDataUtil.createVolumeAndBucket(client, getFSOBucketLayout());
- volumeName = bucket.getVolumeName();
+ String volumeName = bucket.getVolumeName();
volumePath = new Path(OZONE_URI_DELIMITER, volumeName);
- bucketName = bucket.getName();
+ String bucketName = bucket.getName();
bucketPath = new Path(volumePath, bucketName);
String rootPath = String.format("%s://%s/",
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java
index bc4958fdc6..4d5f1dda1c 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java
@@ -44,11 +44,12 @@
*/
public class TestOzoneManagerSnapshotProvider {
+ private static final String OM_SERVICE_ID = "om-service-test1";
+ private static final int NUM_OF_OMS = 3;
+
private MiniOzoneHAClusterImpl cluster = null;
private ObjectStore objectStore;
private OzoneConfiguration conf;
- private String omServiceId;
- private int numOfOMs = 3;
private OzoneClient client;
@@ -58,14 +59,13 @@ public class TestOzoneManagerSnapshotProvider {
@BeforeEach
public void init() throws Exception {
conf = new OzoneConfiguration();
- omServiceId = "om-service-test1";
conf.setBoolean(OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY, true);
cluster = MiniOzoneCluster.newHABuilder(conf)
- .setOMServiceId(omServiceId)
- .setNumOfOzoneManagers(numOfOMs)
+ .setOMServiceId(OM_SERVICE_ID)
+ .setNumOfOzoneManagers(NUM_OF_OMS)
.build();
cluster.waitForClusterToBeReady();
- client = OzoneClientFactory.getRpcClient(omServiceId, conf);
+ client = OzoneClientFactory.getRpcClient(OM_SERVICE_ID, conf);
objectStore = client.getObjectStore();
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java
index de1a0d360d..72a5ed1613 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java
@@ -68,13 +68,14 @@ public class TestDeletedBlocksTxnShell {
private static final Logger LOG = LoggerFactory
.getLogger(TestDeletedBlocksTxnShell.class);
+ private static final String SCM_SERVICE_ID = "scm-service-test1";
+ private static final int NUM_OF_SCMS = 3;
+
private final PrintStream originalOut = System.out;
private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
private MiniOzoneHAClusterImpl cluster = null;
private OzoneConfiguration conf;
- private String scmServiceId;
private File txnFile;
- private int numOfSCMs = 3;
private static final String DEFAULT_ENCODING = StandardCharsets.UTF_8.name();
@@ -89,14 +90,13 @@ public class TestDeletedBlocksTxnShell {
@BeforeEach
public void init() throws Exception {
conf = new OzoneConfiguration();
- scmServiceId = "scm-service-test1";
conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
cluster = MiniOzoneCluster.newHABuilder(conf)
- .setSCMServiceId(scmServiceId)
- .setNumOfStorageContainerManagers(numOfSCMs)
- .setNumOfActiveSCMs(numOfSCMs)
+ .setSCMServiceId(SCM_SERVICE_ID)
+ .setNumOfStorageContainerManagers(NUM_OF_SCMS)
+ .setNumOfActiveSCMs(NUM_OF_SCMS)
.setNumOfOzoneManagers(1)
.build();
cluster.waitForClusterToBeReady();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java
index 2ca6533585..0ad9f62f1c 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java
@@ -85,7 +85,6 @@ public class TestOzoneTenantShell {
@TempDir
private static Path path;
- private static File testFile;
private static final File AUDIT_LOG_FILE = new File("audit.log");
private static OzoneConfiguration conf = null;
@@ -126,7 +125,7 @@ public static void init() throws Exception {
conf.setBoolean(OZONE_OM_TENANT_DEV_SKIP_RANGER, true);
}
- testFile = new File(path + OzoneConsts.OZONE_URI_DELIMITER + "testFile");
+ File testFile = new File(path + OzoneConsts.OZONE_URI_DELIMITER +
"testFile");
FileUtils.touch(testFile);
ozoneSh = new OzoneShell();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java
index 469030b9c0..f0c27e6e25 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java
@@ -45,12 +45,12 @@
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public class TestTransferLeadershipShell {
+ private static final String OM_SERVICE_ID = "om-service-test1";
+ private static final String SCM_SERVICE_ID = "scm-service-test1";
+ private static final int NUM_OF_OMS = 3;
+ private static final int NUM_OF_SCMS = 3;
+
private MiniOzoneHAClusterImpl cluster = null;
- private OzoneConfiguration conf;
- private String omServiceId;
- private String scmServiceId;
- private int numOfOMs = 3;
- private int numOfSCMs = 3;
private OzoneAdmin ozoneAdmin;
private static final long SNAPSHOT_THRESHOLD = 5;
@@ -63,17 +63,16 @@ public class TestTransferLeadershipShell {
@BeforeAll
public void init() throws Exception {
ozoneAdmin = new OzoneAdmin();
- conf = ozoneAdmin.getOzoneConf();
- omServiceId = "om-service-test1";
- scmServiceId = "scm-service-test1";
+ OzoneConfiguration conf = ozoneAdmin.getOzoneConf();
+
conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD,
SNAPSHOT_THRESHOLD);
cluster = MiniOzoneCluster.newHABuilder(conf)
- .setOMServiceId(omServiceId)
- .setSCMServiceId(scmServiceId).setNumOfOzoneManagers(numOfOMs)
- .setNumOfStorageContainerManagers(numOfSCMs)
- .setNumOfActiveSCMs(numOfSCMs).setNumOfActiveOMs(numOfOMs)
+ .setOMServiceId(OM_SERVICE_ID)
+ .setSCMServiceId(SCM_SERVICE_ID).setNumOfOzoneManagers(NUM_OF_OMS)
+ .setNumOfStorageContainerManagers(NUM_OF_SCMS)
+ .setNumOfActiveSCMs(NUM_OF_SCMS).setNumOfActiveOMs(NUM_OF_OMS)
.build();
cluster.waitForClusterToBeReady();
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
index fb9e3d50f8..e6cfc6e467 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
@@ -123,10 +123,6 @@ public abstract class AbstractContractDistCpTest
private Path inputFile5;
- private Path outputDir;
-
- private Path outputSubDir1;
-
private Path outputSubDir2;
private Path outputSubDir4;
@@ -206,10 +202,10 @@ protected void initPathFields(final Path src, final Path
dest) {
* @param path path to set up
*/
protected void initOutputFields(final Path path) {
- outputDir = new Path(path, "outputDir");
+ Path outputDir = new Path(path, "outputDir");
inputDirUnderOutputDir = new Path(outputDir, "inputDir");
outputFile1 = new Path(inputDirUnderOutputDir, "file1");
- outputSubDir1 = new Path(inputDirUnderOutputDir, "subDir1");
+ Path outputSubDir1 = new Path(inputDirUnderOutputDir, "subDir1");
outputFile2 = new Path(outputSubDir1, "file2");
outputSubDir2 = new Path(inputDirUnderOutputDir, "subDir2/subDir2");
outputFile3 = new Path(outputSubDir2, "file3");
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]