This is an automated email from the ASF dual-hosted git repository.
nanda pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 1d64b37c64 HDDS-12380. Fix spotbugs warnings in hdds-container-service
(#7958)
1d64b37c64 is described below
commit 1d64b37c64e3314164da6ee724d3d0f30d546d42
Author: Peter Lee <[email protected]>
AuthorDate: Tue Feb 25 02:19:25 2025 +0800
HDDS-12380. Fix spotbugs warnings in hdds-container-service (#7958)
---
.../dev-support/findbugsExcludeFile.xml | 66 ----------------------
.../AbstractBackgroundContainerScanner.java | 21 +++++--
.../ozone/container/common/SCMTestUtils.java | 13 ++++-
.../ozone/container/common/TestContainerCache.java | 13 +++--
.../container/common/TestDatanodeStateMachine.java | 12 ++--
.../common/impl/TestContainerDataYaml.java | 4 +-
.../impl/TestContainerDeletionChoosingPolicy.java | 6 --
.../common/impl/TestContainerPersistence.java | 2 -
.../common/statemachine/TestStateContext.java | 17 ++++--
.../container/common/volume/TestVolumeSet.java | 4 +-
.../container/keyvalue/TestTarContainerPacker.java | 5 +-
.../TestBackgroundContainerDataScanner.java | 6 +-
.../TestBackgroundContainerMetadataScanner.java | 3 +-
.../ozone/container/common/TestEndPoint.java | 4 +-
14 files changed, 67 insertions(+), 109 deletions(-)
diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
index f68fa91db8..40d78d0cd6 100644
--- a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
@@ -15,70 +15,4 @@
limitations under the License.
-->
<FindBugsFilter>
-
- <!-- Test -->
- <Match>
- <Class name="org.apache.hadoop.ozone.container.common.TestContainerCache"
/>
- <Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.common.TestDatanodeStateMachine" />
- <Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.common.TestDatanodeStateMachine" />
- <Bug pattern="REC_CATCH_EXCEPTION" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.common.impl.TestContainerDataYaml" />
- <Bug pattern="DLS_DEAD_LOCAL_STORE" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.common.impl.TestContainerDataYaml" />
- <Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.common.impl.TestContainerDeletionChoosingPolicy"
/>
- <Bug pattern="UC_USELESS_OBJECT" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.common.impl.TestContainerPersistence" />
- <Bug pattern="DLS_DEAD_LOCAL_STORE" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.common.interfaces.TestHandler" />
- <Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.common.statemachine.TestStateContext" />
- <Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.common.statemachine.commandhandler.TestDeleteBlocksCommandHandler"
/>
- <Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.common.volume.TestVolumeSet" />
- <Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.common.volume.TestVolumeSet" />
- <Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.keyvalue.TestKeyValueContainerMarkUnhealthy"
/>
- <Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.keyvalue.TestTarContainerPacker" />
- <Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.ozoneimpl.TestBackgroundContainerDataScanner"/>
- <Bug pattern="RU_INVOKE_RUN, RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT" />
- </Match>
- <Match>
- <Class
name="org.apache.hadoop.ozone.container.ozoneimpl.TestBackgroundContainerMetadataScanner"/>
- <Bug pattern="RU_INVOKE_RUN, RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT" />
- </Match>
</FindBugsFilter>
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/AbstractBackgroundContainerScanner.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/AbstractBackgroundContainerScanner.java
index dd7e30a5d9..802a86af44 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/AbstractBackgroundContainerScanner.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/AbstractBackgroundContainerScanner.java
@@ -29,12 +29,12 @@
/**
* Base class for scheduled scanners on a Datanode.
*/
-public abstract class AbstractBackgroundContainerScanner extends Thread {
+public abstract class AbstractBackgroundContainerScanner implements Runnable {
public static final Logger LOG =
LoggerFactory.getLogger(AbstractBackgroundContainerScanner.class);
private final long dataScanInterval;
-
+ private final Thread scannerThread;
private final AtomicBoolean stopping;
private final AtomicBoolean pausing = new AtomicBoolean();
@@ -42,8 +42,13 @@ public AbstractBackgroundContainerScanner(String name,
long dataScanInterval) {
this.dataScanInterval = dataScanInterval;
this.stopping = new AtomicBoolean(false);
- setName(name);
- setDaemon(true);
+
+ this.scannerThread = new Thread(this, name);
+ this.scannerThread.setDaemon(true);
+ }
+
+ public void start() {
+ scannerThread.start();
}
@Override
@@ -141,9 +146,9 @@ public final void handleRemainingSleep(long remainingSleep)
{
*/
public synchronized void shutdown() {
if (stopping.compareAndSet(false, true)) {
- this.interrupt();
+ scannerThread.interrupt();
try {
- this.join();
+ scannerThread.join();
} catch (InterruptedException ex) {
LOG.warn("Unexpected exception while stopping data scanner.", ex);
Thread.currentThread().interrupt();
@@ -151,6 +156,10 @@ public synchronized void shutdown() {
}
}
+ public boolean isAlive() {
+ return scannerThread.isAlive();
+ }
+
public void pause() {
pausing.getAndSet(true);
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
index 472fc6c640..8a520da3cd 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.container.common;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
import com.google.protobuf.BlockingService;
@@ -122,12 +123,18 @@ public static InetSocketAddress getReuseableAddress()
throws IOException {
public static OzoneConfiguration getConf(File testDir) {
OzoneConfiguration conf = new OzoneConfiguration();
+ File datanodeDir = new File(testDir, "datanode");
+ File metadataDir = new File(testDir, "metadata");
+ File datanodeIdDir = new File(testDir, "datanodeID");
+ assertTrue(datanodeDir.mkdirs());
+ assertTrue(metadataDir.mkdirs());
+ assertTrue(datanodeIdDir.mkdirs());
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
- new File(testDir, "datanode").getAbsolutePath());
+ datanodeDir.getAbsolutePath());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
- new File(testDir, "metadata").getAbsolutePath());
+ metadataDir.getAbsolutePath());
conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR,
- new File(testDir, "datanodeID").getAbsolutePath());
+ datanodeIdDir.getAbsolutePath());
conf.setClass(SpaceUsageCheckFactory.Conf.configKeyForClassName(),
MockSpaceUsageCheckFactory.None.class,
SpaceUsageCheckFactory.class);
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
index 3beb734f17..f8ae2f3bdc 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
@@ -22,6 +22,7 @@
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import java.io.File;
@@ -31,6 +32,7 @@
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
+import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -63,7 +65,7 @@ private void createContainerDB(OzoneConfiguration conf, File
dbFile)
@Test
public void testContainerCacheEviction() throws Exception {
File root = new File(testRoot);
- root.mkdirs();
+ assertTrue(root.mkdirs());
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
@@ -139,13 +141,14 @@ public void testContainerCacheEviction() throws Exception
{
db4.close();
db5.close();
});
+
+ FileUtils.deleteDirectory(root);
}
@Test
void testConcurrentDBGet() throws Exception {
File root = new File(testRoot);
- root.mkdirs();
- root.deleteOnExit();
+ assertTrue(root.mkdirs());
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
@@ -180,12 +183,13 @@ void testConcurrentDBGet() throws Exception {
db.close();
assertEquals(1, cache.size());
db.cleanup();
+ FileUtils.deleteDirectory(root);
}
@Test
public void testUnderlyingDBzIsClosed() throws Exception {
File root = new File(testRoot);
- root.mkdirs();
+ assertTrue(root.mkdirs());
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
@@ -217,5 +221,6 @@ public void testUnderlyingDBzIsClosed() throws Exception {
db3.close();
db4.close();
cache.clear();
+ FileUtils.deleteDirectory(root);
}
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index a959915a8a..0ee12be723 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -195,7 +195,8 @@ public void testDatanodeStateContext() throws IOException,
File idPath = new File(
conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR),
OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT);
- idPath.delete();
+ assertTrue(idPath.createNewFile());
+ assertTrue(idPath.delete());
DatanodeDetails datanodeDetails = getNewDatanodeDetails();
DatanodeDetails.Port port = DatanodeDetails.newStandalonePort(
OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT);
@@ -317,11 +318,11 @@ public void testDatanodeStateContext() throws IOException,
@Test
public void testDatanodeStateMachineWithIdWriteFail() throws Exception {
-
File idPath = new File(
conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR),
OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT);
- idPath.delete();
+ assertTrue(idPath.createNewFile());
+ assertTrue(idPath.delete());
DatanodeDetails datanodeDetails = getNewDatanodeDetails();
DatanodeDetails.Port port = DatanodeDetails.newStandalonePort(
OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT);
@@ -340,8 +341,7 @@ public void testDatanodeStateMachineWithIdWriteFail()
throws Exception {
//Set the idPath to read only, state machine will fail to write
// datanodeId file and set the state to shutdown.
- idPath.getParentFile().mkdirs();
- idPath.getParentFile().setReadOnly();
+ assertTrue(idPath.getParentFile().setReadOnly());
task.execute(executorService);
DatanodeStateMachine.DatanodeStates newState =
@@ -398,7 +398,7 @@ public void
testDatanodeStateMachineWithInvalidConfiguration()
task.await(2, TimeUnit.SECONDS);
assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN,
newState);
- } catch (Exception e) {
+ } catch (IOException | InterruptedException | TimeoutException |
ExecutionException e) {
fail("Unexpected exception found");
}
});
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
index 4b2ad6db7c..b2612c9e0f 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
@@ -70,7 +70,7 @@ private void setLayoutVersion(ContainerLayoutVersion
layoutVersion) {
*/
private File createContainerFile(long containerID, int replicaIndex)
throws IOException {
- new File(testRoot).mkdirs();
+ assertTrue(new File(testRoot).mkdirs());
String containerPath = containerID + ".container";
@@ -167,6 +167,8 @@ public void testCreateContainerFile(ContainerLayoutVersion
layout)
kvData.lastDataScanTime().get().toEpochMilli());
assertEquals(SCAN_TIME.toEpochMilli(),
kvData.getDataScanTimestamp().longValue());
+
+ cleanup();
}
@ContainerLayoutTestInfo.ContainerTest
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
index 5348e72766..e1b216a279 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
@@ -29,7 +29,6 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
-import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Random;
@@ -39,7 +38,6 @@
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import
org.apache.hadoop.ozone.container.common.impl.BlockDeletingService.ContainerBlockInfo;
import
org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy;
@@ -83,8 +81,6 @@ public void testRandomChoosingPolicy(ContainerLayoutVersion
layout)
conf.set(
ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
RandomContainerDeletionChoosingPolicy.class.getName());
- List<StorageLocation> pathLists = new LinkedList<>();
- pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath()));
containerSet = new ContainerSet(1000);
int numContainers = 10;
@@ -146,8 +142,6 @@ public void
testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout)
conf.set(
ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
TopNOrderedContainerDeletionChoosingPolicy.class.getName());
- List<StorageLocation> pathLists = new LinkedList<>();
- pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath()));
containerSet = new ContainerSet(1000);
int numContainers = 10;
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 6b7d2254d1..3189a34398 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -38,7 +38,6 @@
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.nio.file.Paths;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -660,7 +659,6 @@ public void testWritReadManyChunks(ContainerTestVersionInfo
versionInfo)
KeyValueContainerData cNewData =
(KeyValueContainerData) container.getContainerData();
assertNotNull(cNewData);
- Path dataDir = Paths.get(cNewData.getChunksPath());
// Read chunk via file system and verify.
Checksum checksum = new Checksum(ChecksumType.CRC32, 1024 * 1024);
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
index 0174d800c4..15c8386a89 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
@@ -44,6 +44,7 @@
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -572,9 +573,11 @@ public void testIsThreadPoolAvailable() throws Exception {
// task num greater than pool size
for (int i = 0; i < threadPoolSize; i++) {
- executorService.submit((Callable<String>) futureOne::get);
+ Future<String> future = executorService.submit((Callable<String>)
futureOne::get);
+ assertFalse(future.isDone());
}
- executorService.submit((Callable<String>) futureTwo::get);
+ Future<String> future = executorService.submit((Callable<String>)
futureTwo::get);
+ assertFalse(future.isDone());
assertFalse(stateContext.isThreadPoolAvailable(executorService));
@@ -592,9 +595,11 @@ public void doesNotAwaitWithoutExecute() throws Exception {
final AtomicInteger awaited = new AtomicInteger();
ExecutorService executorService = Executors.newFixedThreadPool(1);
- CompletableFuture<String> future = new CompletableFuture<>();
- executorService.submit((Callable<String>) future::get);
- executorService.submit((Callable<String>) future::get);
+ CompletableFuture<String> task = new CompletableFuture<>();
+ Future<String> future = executorService.submit((Callable<String>)
task::get);
+ assertFalse(future.isDone());
+ future = executorService.submit((Callable<String>) task::get);
+ assertFalse(future.isDone());
StateContext subject = new StateContext(new OzoneConfiguration(),
DatanodeStates.INIT, mock(DatanodeStateMachine.class), "") {
@@ -631,7 +636,7 @@ public DatanodeStates await(long time, TimeUnit timeUnit) {
assertEquals(0, awaited.get());
assertEquals(0, executed.get());
- future.complete("any");
+ task.complete("any");
LambdaTestUtils.await(1000, 100, () ->
subject.isThreadPoolAvailable(executorService));
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index ef2cc4489d..4614ee952b 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -185,10 +185,10 @@ public void testVolumeInInconsistentState() throws
Exception {
// Create the root volume dir and create a sub-directory within it.
File newVolume = new File(volume3, HDDS_VOLUME_DIR);
System.out.println("new volume root: " + newVolume);
- newVolume.mkdirs();
+ assertTrue(newVolume.mkdirs());
assertTrue(newVolume.exists(), "Failed to create new volume root");
File dataDir = new File(newVolume, "chunks");
- dataDir.mkdirs();
+ assertTrue(dataDir.mkdirs());
assertTrue(dataDir.exists());
// The new volume is in an inconsistent state as the root dir is
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
index 62d4791755..cae19d6721 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
@@ -24,6 +24,7 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -380,7 +381,9 @@ private File writeDbFile(
private File writeSingleFile(Path parentPath, String fileName,
String content) throws IOException {
Path path = parentPath.resolve(fileName).normalize();
- Files.createDirectories(path.getParent());
+ Path parent = path.getParent();
+ assertNotNull(parent);
+ Files.createDirectories(parent);
File file = path.toFile();
FileOutputStream fileStream = new FileOutputStream(file);
try (OutputStreamWriter writer = new OutputStreamWriter(fileStream,
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java
index ee83bf1817..3da90a294a 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java
@@ -106,12 +106,11 @@ public void testScannerMetrics() {
@Override
public void testScannerMetricsUnregisters() {
String name = scanner.getMetrics().getName();
-
assertNotNull(DefaultMetricsSystem.instance().getSource(name));
scanner.shutdown();
scanner.run();
-
+
assertNull(DefaultMetricsSystem.instance().getSource(name));
}
@@ -204,7 +203,8 @@ public void testWithVolumeFailure() throws Exception {
GenericTestUtils.waitFor(() -> !scanner.isAlive(), 1000, 5000);
// Volume health should have been checked.
- verify(vol, atLeastOnce()).isFailed();
+ // TODO: remove the mock return value asseration after we upgrade to
spotbugs 4.8 up
+ assertFalse(verify(vol, atLeastOnce()).isFailed());
// No iterations should have been run.
assertEquals(0, metrics.getNumScanIterations());
assertEquals(0, metrics.getNumContainersScanned());
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java
index dc2f08d217..c4e82714ed 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java
@@ -182,7 +182,8 @@ public void testWithVolumeFailure() throws Exception {
GenericTestUtils.waitFor(() -> metrics.getNumScanIterations() >= 1, 1000,
5000);
// Volume health should have been checked.
- verify(vol, atLeastOnce()).isFailed();
+ // TODO: remove the mock return value asseration after we upgrade to
spotbugs 4.8 up
+ assertFalse(verify(vol, atLeastOnce()).isFailed());
// Scanner should not have shutdown when it encountered the failed volume.
assertTrue(scanner.isAlive());
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index b6d23777a9..05a0913991 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -515,10 +515,10 @@ public void testHeartbeat() throws Exception {
}
@Test
- public void testHeartbeatWithCommandStatusReport() throws Exception {
+ public void testHeartbeatWithCommandStatusReport(@TempDir File
endPointTempDir) throws Exception {
DatanodeDetails dataNode = randomDatanodeDetails();
try (EndpointStateMachine rpcEndPoint =
- createEndpoint(SCMTestUtils.getConf(tempDir),
+ createEndpoint(SCMTestUtils.getConf(endPointTempDir),
serverAddress, 1000)) {
// Add some scmCommands for heartbeat response
addScmCommands();
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]