This is an automated email from the ASF dual-hosted git repository.
myskov pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new e3e1f65d7d HDDS-13132. Convert redundant fields to local var in
production code (#8921)
e3e1f65d7d is described below
commit e3e1f65d7ddacc1b8293931f17c05a83ac13282f
Author: Ivan Zlenko <[email protected]>
AuthorDate: Tue Aug 26 14:11:08 2025 +0400
HDDS-13132. Convert redundant fields to local var in production code (#8921)
---
.../hadoop/hdds/scm/XceiverClientMetrics.java | 9 ++++++---
.../hdds/scm/storage/BlockDataStreamOutput.java | 7 +++----
.../container/common/helpers/ContainerMetrics.java | 5 ++++-
.../common/transport/server/XceiverServerGrpc.java | 2 +-
.../common/transport/server/ratis/CSMMetrics.java | 5 ++++-
.../transport/server/ratis/XceiverServerRatis.java | 3 +++
.../ozone/erasurecode/rawcoder/RSRawDecoder.java | 2 ++
.../ozone/erasurecode/rawcoder/RSRawEncoder.java | 1 +
.../SecretKeyProtocolClientSideTranslatorPB.java | 3 +--
.../hadoop/hdds/server/http/BaseHttpServer.java | 6 ++----
.../container/balancer/ContainerBalancerTask.java | 13 +++++--------
.../hdds/scm/server/StorageContainerManager.java | 3 +--
.../ozone/shell/volume/DeleteVolumeHandler.java | 3 +--
.../org/apache/hadoop/ozone/om/ha/OMProxyInfo.java | 4 +---
.../hadoop/ozone/om/lock/OzoneManagerLock.java | 9 +++------
.../ozone/freon/AbstractOmBucketReadWriteOps.java | 3 +--
.../hadoop/ozone/freon/OmRPCLoadGenerator.java | 3 +--
.../hadoop/ozone/freon/RandomKeyGenerator.java | 3 +++
.../freon/containergenerator/GeneratorScm.java | 4 +---
.../org/apache/hadoop/ozone/om/OzoneManager.java | 6 ++----
.../om/request/file/OMRecoverLeaseRequest.java | 6 ++----
.../org/apache/hadoop/ozone/recon/ReconServer.java | 5 ++---
.../scm/ReconStorageContainerManagerFacade.java | 22 ++++++++--------------
.../spi/impl/OzoneManagerServiceProviderImpl.java | 5 ++---
.../upgrade/InitialConstraintUpgradeAction.java | 9 +--------
.../UnhealthyContainerReplicaMismatchAction.java | 4 +---
.../TestInitialConstraintUpgradeAction.java | 1 -
.../hadoop/ozone/s3/metrics/S3GatewayMetrics.java | 2 ++
.../container/utils/ContainerLogFileParser.java | 3 +--
29 files changed, 65 insertions(+), 86 deletions(-)
diff --git
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
index 11d55227ab..1402ea4de6 100644
---
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
+++
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
@@ -51,16 +51,19 @@ public class XceiverClientMetrics implements MetricsSource {
private EnumMap<ContainerProtos.Type, MutableCounterLong> pendingOpsArray;
private EnumMap<ContainerProtos.Type, MutableCounterLong> opsArray;
private EnumMap<ContainerProtos.Type, PerformanceMetrics>
containerOpsLatency;
+
+ // TODO: https://issues.apache.org/jira/browse/HDDS-13555
+ @SuppressWarnings("PMD.SingularField")
private MetricsRegistry registry;
- private OzoneConfiguration conf = new OzoneConfiguration();
- private int[] intervals = conf.getInts(OzoneConfigKeys
- .OZONE_XCEIVER_CLIENT_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY);
public XceiverClientMetrics() {
init();
}
public void init() {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ int[] intervals =
conf.getInts(OzoneConfigKeys.OZONE_XCEIVER_CLIENT_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY);
+
this.registry = new MetricsRegistry(SOURCE_NAME);
this.pendingOpsArray = new EnumMap<>(ContainerProtos.Type.class);
diff --git
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index 7eccdc2faf..77518b7533 100644
---
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -125,8 +125,6 @@ public class BlockDataStreamOutput implements
ByteBufferStreamOutput {
private final List<DatanodeDetails> failedServers;
private final Checksum checksum;
- //number of buffers used before doing a flush/putBlock.
- private int flushPeriod;
private final Token<? extends TokenIdentifier> token;
private final String tokenString;
private final DataStreamOutput out;
@@ -172,8 +170,9 @@ public BlockDataStreamOutput(
// Alternatively, stream setup can be delayed till the first chunk write.
this.out = setupStream(pipeline);
this.bufferList = bufferList;
- flushPeriod = (int) (config.getStreamBufferFlushSize() / config
- .getStreamBufferSize());
+
+ //number of buffers used before doing a flush/putBlock.
+ int flushPeriod = (int) (config.getStreamBufferFlushSize() /
config.getStreamBufferSize());
Preconditions
.checkArgument(
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
index 7ad04eb3ec..8ee2b4e5c0 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
@@ -64,7 +64,10 @@ public class ContainerMetrics implements Closeable {
private final EnumMap<ContainerProtos.Type, MutableCounterLong>
opsForClosedContainer;
private final EnumMap<ContainerProtos.Type, MutableRate> opsLatency;
private final EnumMap<ContainerProtos.Type, MutableQuantiles[]>
opsLatQuantiles;
- private MetricsRegistry registry = null;
+
+ // TODO: https://issues.apache.org/jira/browse/HDDS-13555
+ @SuppressWarnings("PMD.SingularField")
+ private MetricsRegistry registry;
public ContainerMetrics(int[] intervals) {
final int len = intervals.length;
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index a81e992bb0..d29aaba3c7 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -77,7 +77,6 @@ public final class XceiverServerGrpc implements
XceiverServerSpi {
private DatanodeDetails datanodeDetails;
private ThreadPoolExecutor readExecutors;
private EventLoopGroup eventLoopGroup;
- private Class<? extends ServerChannel> channelType;
/**
* Constructs a Grpc server class.
@@ -119,6 +118,7 @@ public XceiverServerGrpc(DatanodeDetails datanodeDetails,
"ChunkReader-ELG-%d")
.build();
+ Class<? extends ServerChannel> channelType;
if (Epoll.isAvailable()) {
eventLoopGroup = new EpollEventLoopGroup(poolSize / 10, factory);
channelType = EpollServerSocketChannel.class;
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
index 6248e09fb9..57939edf51 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
@@ -51,7 +51,10 @@ public class CSMMetrics {
private @Metric MutableRate transactionLatencyMs;
private final EnumMap<Type, MutableRate> opsLatencyMs;
private final EnumMap<Type, MutableRate> opsQueueingDelay;
- private MetricsRegistry registry = null;
+
+ // TODO: https://issues.apache.org/jira/browse/HDDS-13555
+ @SuppressWarnings("PMD.SingularField")
+ private MetricsRegistry registry;
// Failure Metrics
private @Metric MutableCounterLong numWriteStateMachineFails;
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 62a24401aa..eca17f46d7 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -132,6 +132,9 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
private int serverPort;
private int adminPort;
private int clientPort;
+
+ // TODO: https://issues.apache.org/jira/browse/HDDS-13558
+ @SuppressWarnings("PMD.SingularField")
private int dataStreamPort;
private final RaftServer server;
private final String name;
diff --git
a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawDecoder.java
b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawDecoder.java
index 89fcca9d04..72a5506f6a 100644
---
a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawDecoder.java
+++
b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawDecoder.java
@@ -51,6 +51,8 @@ public class RSRawDecoder extends RawErasureDecoder {
private int[] cachedErasedIndexes;
private int[] validIndexes;
private int numErasedDataUnits;
+
+ @SuppressWarnings("PMD.SingularField")
private boolean[] erasureFlags;
public RSRawDecoder(ECReplicationConfig ecReplicationConfig) {
diff --git
a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawEncoder.java
b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawEncoder.java
index 4e68abac94..1d78b1a956 100644
---
a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawEncoder.java
+++
b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawEncoder.java
@@ -29,6 +29,7 @@
*/
public class RSRawEncoder extends RawErasureEncoder {
// relevant to schema and won't change during encode calls.
+ @SuppressWarnings("PMD.SingularField")
private byte[] encodeMatrix;
/**
* Array of input tables generated from coding coefficients previously.
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolClientSideTranslatorPB.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolClientSideTranslatorPB.java
index b91b752334..b04fc4596b 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolClientSideTranslatorPB.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolClientSideTranslatorPB.java
@@ -58,13 +58,12 @@ public class SecretKeyProtocolClientSideTranslatorPB
implements
*/
private static final RpcController NULL_RPC_CONTROLLER = null;
private final BlockingInterface rpcProxy;
- private SecretKeyProtocolFailoverProxyProvider failoverProxyProvider;
public SecretKeyProtocolClientSideTranslatorPB(
SecretKeyProtocolFailoverProxyProvider<? extends BlockingInterface>
proxyProvider, Class<? extends BlockingInterface> proxyClazz) {
Preconditions.checkState(proxyProvider != null);
- this.failoverProxyProvider = proxyProvider;
+ SecretKeyProtocolFailoverProxyProvider failoverProxyProvider =
proxyProvider;
this.rpcProxy = (BlockingInterface) RetryProxy.create(
proxyClazz, failoverProxyProvider,
failoverProxyProvider.getRetryPolicy());
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java
index 74e415de03..30f31c3093 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java
@@ -85,8 +85,6 @@ public abstract class BaseHttpServer {
private boolean prometheusSupport;
- private boolean profilerSupport;
-
public BaseHttpServer(MutableConfigurationSource conf, String name)
throws IOException {
this.name = name;
@@ -152,8 +150,8 @@ public BaseHttpServer(MutableConfigurationSource conf,
String name)
prometheusSupport = addDefaultApps &&
conf.getBoolean(HddsConfigKeys.HDDS_PROMETHEUS_ENABLED, true);
- profilerSupport = addDefaultApps &&
- conf.getBoolean(HddsConfigKeys.HDDS_PROFILER_ENABLED, false);
+ boolean profilerSupport = addDefaultApps &&
+
conf.getBoolean(HddsConfigKeys.HDDS_PROFILER_ENABLED, false);
if (prometheusSupport) {
prometheusMetricsSink = new PrometheusMetricsSink(name);
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
index 9b0e16fc3f..6d8614ecc8 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
@@ -91,7 +91,6 @@ public class ContainerBalancerTask implements Runnable {
private long sizeScheduledForMoveInLatestIteration;
// count actual size moved in bytes
private long sizeActuallyMovedInLatestIteration;
- private int iterations;
private final List<DatanodeUsageInfo> overUtilizedNodes;
private final List<DatanodeUsageInfo> underUtilizedNodes;
private List<DatanodeUsageInfo> withinThresholdUtilizedNodes;
@@ -99,8 +98,6 @@ public class ContainerBalancerTask implements Runnable {
private Set<String> includeNodes;
private ContainerBalancerConfiguration config;
private ContainerBalancerMetrics metrics;
- private PlacementPolicyValidateProxy placementPolicyValidateProxy;
- private NetworkTopology networkTopology;
private double upperLimit;
private double lowerLimit;
private ContainerBalancerSelectionCriteria selectionCriteria;
@@ -156,8 +153,8 @@ public ContainerBalancerTask(StorageContainerManager scm,
this.overUtilizedNodes = new ArrayList<>();
this.underUtilizedNodes = new ArrayList<>();
this.withinThresholdUtilizedNodes = new ArrayList<>();
- this.placementPolicyValidateProxy = scm.getPlacementPolicyValidateProxy();
- this.networkTopology = scm.getClusterMap();
+ PlacementPolicyValidateProxy placementPolicyValidateProxy =
scm.getPlacementPolicyValidateProxy();
+ NetworkTopology networkTopology = scm.getClusterMap();
this.nextIterationIndex = nextIterationIndex;
this.containerToSourceMap = new HashMap<>();
this.containerToTargetMap = new HashMap<>();
@@ -212,10 +209,10 @@ public void stop() {
}
private void balance() {
- this.iterations = config.getIterations();
- if (this.iterations == -1) {
+ int iterations = config.getIterations();
+ if (iterations == -1) {
//run balancer infinitely
- this.iterations = Integer.MAX_VALUE;
+ iterations = Integer.MAX_VALUE;
}
// nextIterationIndex is the iteration that balancer should start from on
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 9149d07841..4253e7b1c1 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -286,7 +286,6 @@ public final class StorageContainerManager extends
ServiceRuntimeInfoImpl
private SCMContainerMetrics scmContainerMetrics;
private SCMContainerPlacementMetrics placementMetrics;
private PlacementPolicy containerPlacementPolicy;
- private PlacementPolicy ecContainerPlacementPolicy;
private PlacementPolicyValidateProxy placementPolicyValidateProxy;
private MetricsSystem ms;
private final Map<String, RatisDropwizardExports> ratisMetricsMap =
@@ -740,7 +739,7 @@ private void initializeSystemManagers(OzoneConfiguration
conf,
ContainerPlacementPolicyFactory.getPolicy(conf, scmNodeManager,
clusterMap, true, placementMetrics);
- ecContainerPlacementPolicy = ContainerPlacementPolicyFactory.getECPolicy(
+ PlacementPolicy ecContainerPlacementPolicy =
ContainerPlacementPolicyFactory.getECPolicy(
conf, scmNodeManager, clusterMap, true, placementMetrics);
placementPolicyValidateProxy = new PlacementPolicyValidateProxy(
diff --git
a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java
b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java
index 3a5968a851..b8044a8f90 100644
---
a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java
+++
b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java
@@ -60,7 +60,6 @@ public class DeleteVolumeHandler extends VolumeHandler {
@CommandLine.Option(names = {"-y", "--yes"},
description = "Continue without interactive user confirmation")
private boolean yes;
- private ExecutorService executor;
private List<String> bucketIdList = new ArrayList<>();
private AtomicInteger cleanedBucketCounter =
new AtomicInteger();
@@ -216,7 +215,7 @@ public void run() {
}
private void doCleanBuckets() throws InterruptedException {
- executor = Executors.newFixedThreadPool(threadNo);
+ ExecutorService executor = Executors.newFixedThreadPool(threadNo);
for (int i = 0; i < threadNo; i++) {
executor.execute(new BucketCleaner());
}
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMProxyInfo.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMProxyInfo.java
index f3252e6de7..8ea1749db9 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMProxyInfo.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMProxyInfo.java
@@ -28,7 +28,6 @@
* Class to store OM proxy information.
*/
public class OMProxyInfo {
- private String serviceId;
private String nodeId;
private String rpcAddrStr;
private InetSocketAddress rpcAddr;
@@ -38,7 +37,6 @@ public class OMProxyInfo {
LoggerFactory.getLogger(OMProxyInfo.class);
OMProxyInfo(String serviceID, String nodeID, String rpcAddress) {
- this.serviceId = serviceID;
this.nodeId = nodeID;
this.rpcAddrStr = rpcAddress;
this.rpcAddr = NetUtils.createSocketAddr(rpcAddrStr);
@@ -46,7 +44,7 @@ public class OMProxyInfo {
LOG.warn("OzoneManager address {} for serviceID {} remains unresolved " +
"for node ID {} Check your ozone-site.xml file to ensure ozone "
+
"manager addresses are configured properly.",
- rpcAddress, serviceId, nodeId);
+ rpcAddress, serviceID, nodeId);
this.dtService = null;
} else {
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java
index 6cd96f7323..364322c3ae 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java
@@ -715,9 +715,6 @@ public enum LeveledResource implements Resource {
PREFIX_LOCK((byte) 6, "PREFIX_LOCK"), //127
SNAPSHOT_LOCK((byte) 7, "SNAPSHOT_LOCK"); // = 255
- // level of the resource
- private byte lockLevel;
-
// This will tell the value, till which we can allow locking.
private short mask;
@@ -731,9 +728,9 @@ public enum LeveledResource implements Resource {
private ResourceManager resourceManager;
LeveledResource(byte pos, String name) {
- this.lockLevel = pos;
- this.mask = (short) (Math.pow(2, lockLevel + 1) - 1);
- this.setMask = (short) Math.pow(2, lockLevel);
+ // level of the resource
+ this.mask = (short) (Math.pow(2, pos + 1) - 1);
+ this.setMask = (short) Math.pow(2, pos);
this.name = name;
this.resourceManager = new ResourceManager();
}
diff --git
a/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/AbstractOmBucketReadWriteOps.java
b/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/AbstractOmBucketReadWriteOps.java
index 5929476d73..1e418b1486 100644
---
a/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/AbstractOmBucketReadWriteOps.java
+++
b/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/AbstractOmBucketReadWriteOps.java
@@ -86,7 +86,6 @@ public abstract class AbstractOmBucketReadWriteOps extends
BaseFreonGenerator
defaultValue = "10")
private int numOfWriteOperations;
- private OzoneConfiguration ozoneConfiguration;
private Timer timer;
private ContentGenerator contentGenerator;
private int readThreadCount;
@@ -115,7 +114,7 @@ public Void call() throws Exception {
print("numOfReadOperations: " + numOfReadOperations);
print("numOfWriteOperations: " + numOfWriteOperations);
- ozoneConfiguration = createOzoneConfiguration();
+ OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
contentGenerator = new ContentGenerator(size.toBytes(), bufferSize);
timer = getMetrics().timer("om-bucket-read-write-ops");
diff --git
a/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java
b/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java
index 94b33dbfa7..51aac5a2d2 100644
---
a/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java
+++
b/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java
@@ -45,7 +45,6 @@ public class OmRPCLoadGenerator extends BaseFreonGenerator
implements Callable<Void> {
private Timer timer;
- private OzoneConfiguration configuration;
private OzoneManagerProtocolClientSideTranslatorPB[] clients;
private byte[] payloadReqBytes = new byte[0];
private int payloadRespSize;
@@ -81,7 +80,7 @@ public Void call() throws Exception {
Preconditions.checkArgument(payloadRespSizeKB >= 0,
"OM echo response payload size should be positive value or zero.");
- configuration = createOzoneConfiguration();
+ OzoneConfiguration configuration = createOzoneConfiguration();
clients = new OzoneManagerProtocolClientSideTranslatorPB[clientsCount];
for (int i = 0; i < clientsCount; i++) {
clients[i] = createOmClient(configuration, null);
diff --git
a/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
b/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
index ed636fba0c..b59916f1a5 100644
---
a/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
+++
b/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
@@ -201,10 +201,12 @@ public final class RandomKeyGenerator implements
Callable<Void>, FreonSubcommand
private ReplicationConfig replicationConfig;
+ @SuppressWarnings("PMD.SingularField")
private int threadPoolSize;
private OzoneClient ozoneClient;
private ObjectStore objectStore;
+ @SuppressWarnings("PMD.SingularField")
private ExecutorService executor;
private long startTime;
@@ -241,6 +243,7 @@ public final class RandomKeyGenerator implements
Callable<Void>, FreonSubcommand
private ArrayList<Histogram> histograms = new ArrayList<>();
private OzoneConfiguration ozoneConfiguration;
+ @SuppressWarnings("PMD.SingularField")
private ProgressBar progressbar;
public RandomKeyGenerator() {
diff --git
a/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java
b/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java
index 0e9aa83309..9de7a6e21d 100644
---
a/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java
+++
b/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java
@@ -48,8 +48,6 @@
@MetaInfServices(FreonSubcommand.class)
public class GeneratorScm extends BaseGenerator {
- private DBStore scmDb;
-
private Table<ContainerID, ContainerInfo> containerStore;
private Timer timer;
@@ -60,7 +58,7 @@ public Void call() throws Exception {
ConfigurationSource config = createOzoneConfiguration();
- scmDb = DBStoreBuilder.createDBStore(config, SCMDBDefinition.get());
+ DBStore scmDb = DBStoreBuilder.createDBStore(config,
SCMDBDefinition.get());
containerStore = CONTAINERS.getTable(scmDb);
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 89c920f9a3..923a94403e 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -418,7 +418,6 @@ public final class OzoneManager extends
ServiceRuntimeInfoImpl
private final OMStorage omStorage;
private ObjectName omInfoBeanName;
private Timer metricsTimer;
- private ScheduleOMMetricsWriteTask scheduleOMMetricsWriteTask;
private static final ObjectWriter WRITER =
new ObjectMapper().writerWithDefaultPrettyPrinter();
private static final ObjectReader READER =
@@ -1860,7 +1859,7 @@ public void start() throws IOException {
// Schedule save metrics
long period = configuration.getTimeDuration(OZONE_OM_METRICS_SAVE_INTERVAL,
OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
- scheduleOMMetricsWriteTask = new ScheduleOMMetricsWriteTask();
+ ScheduleOMMetricsWriteTask scheduleOMMetricsWriteTask = new
ScheduleOMMetricsWriteTask();
metricsTimer = new Timer();
metricsTimer.schedule(scheduleOMMetricsWriteTask, 0, period);
@@ -1942,7 +1941,7 @@ public void restart() throws IOException {
// Schedule save metrics
long period = configuration.getTimeDuration(OZONE_OM_METRICS_SAVE_INTERVAL,
OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
- scheduleOMMetricsWriteTask = new ScheduleOMMetricsWriteTask();
+ ScheduleOMMetricsWriteTask scheduleOMMetricsWriteTask = new
ScheduleOMMetricsWriteTask();
metricsTimer = new Timer();
metricsTimer.schedule(scheduleOMMetricsWriteTask, 0, period);
@@ -2390,7 +2389,6 @@ public boolean stop() {
if (metricsTimer != null) {
metricsTimer.cancel();
metricsTimer = null;
- scheduleOMMetricsWriteTask = null;
}
omRpcServer.stop();
if (isOmGrpcServerEnabled) {
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java
index 3039992571..5c96ae67fb 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java
@@ -75,8 +75,6 @@ public class OMRecoverLeaseRequest extends OMKeyRequest {
private String volumeName;
private String bucketName;
private String keyName;
- private OmKeyInfo keyInfo;
- private String dbFileKey;
private OmKeyInfo openKeyInfo;
private String dbOpenFileKey;
private boolean force;
@@ -198,9 +196,9 @@ private RecoverLeaseResponse doWork(OzoneManager
ozoneManager,
.setErrMsg(errMsg)
.build();
- dbFileKey = fsoFile.getOzonePathKey();
+ String dbFileKey = fsoFile.getOzonePathKey();
- keyInfo = getKey(dbFileKey);
+ OmKeyInfo keyInfo = getKey(dbFileKey);
if (keyInfo == null) {
throw new OMException("Key:" + keyName + " not found in keyTable.",
KEY_NOT_FOUND);
}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
index d290a3e66c..39c6d3cda5 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
@@ -81,7 +81,6 @@ public class ReconServer extends GenericCli implements
Callable<Void> {
private ReconDBProvider reconDBProvider;
private ReconNamespaceSummaryManager reconNamespaceSummaryManager;
private OzoneStorageContainerManager reconStorageContainerManager;
- private ReconSafeModeManager reconSafeModeMgr;
private OzoneConfiguration configuration;
private ReconStorageConfig reconStorage;
private CertificateClient certClient;
@@ -146,8 +145,8 @@ public Void call() throws Exception {
reconSchemaManager.createReconSchema();
LOG.debug("Recon schema creation done.");
- this.reconSafeModeMgr = injector.getInstance(ReconSafeModeManager.class);
- this.reconSafeModeMgr.setInSafeMode(true);
+ ReconSafeModeManager reconSafeModeMgr =
injector.getInstance(ReconSafeModeManager.class);
+ reconSafeModeMgr.setInSafeMode(true);
httpServer = injector.getInstance(ReconHttpServer.class);
this.ozoneManagerServiceProvider =
injector.getInstance(OzoneManagerServiceProvider.class);
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
index 09c54fd937..b6b7e3cf5b 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
@@ -160,17 +160,12 @@ public class ReconStorageContainerManagerFacade
private ReconNodeManager nodeManager;
private ReconPipelineManager pipelineManager;
private ReconContainerManager containerManager;
- private NetworkTopology clusterMap;
private StorageContainerServiceProvider scmServiceProvider;
private Set<ReconScmTask> reconScmTasks = new HashSet<>();
- private SCMContainerPlacementMetrics placementMetrics;
- private PlacementPolicy containerPlacementPolicy;
- private HDDSLayoutVersionManager scmLayoutVersionManager;
private ReconSafeModeManager safeModeManager;
private ReconSafeModeMgrTask reconSafeModeMgrTask;
private ContainerSizeCountTask containerSizeCountTask;
private ContainerCountBySizeDao containerCountBySizeDao;
- private ScheduledExecutorService scheduler;
private AtomicBoolean isSyncDataFromSCMRunning;
private final String threadNamePrefix;
@@ -218,10 +213,10 @@ public
ReconStorageContainerManagerFacade(OzoneConfiguration conf,
scmClientFailOverMaxRetryCount);
this.scmStorageConfig = new ReconStorageConfig(conf, reconUtils);
- this.clusterMap = new NetworkTopologyImpl(conf);
+ NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
this.dbStore = DBStoreBuilder.createDBStore(ozoneConfiguration,
ReconSCMDBDefinition.get());
- this.scmLayoutVersionManager =
+ HDDSLayoutVersionManager scmLayoutVersionManager =
new HDDSLayoutVersionManager(scmStorageConfig.getLayoutVersion());
this.scmhaManager = SCMHAManagerStub.getInstance(
true, new SCMDBTransactionBufferImpl());
@@ -231,11 +226,10 @@ public
ReconStorageContainerManagerFacade(OzoneConfiguration conf,
this.nodeManager =
new ReconNodeManager(conf, scmStorageConfig, eventQueue, clusterMap,
ReconSCMDBDefinition.NODES.getTable(dbStore),
- this.scmLayoutVersionManager, reconContext);
- placementMetrics = SCMContainerPlacementMetrics.create();
- this.containerPlacementPolicy =
- ContainerPlacementPolicyFactory.getPolicy(conf, nodeManager,
- clusterMap, true, placementMetrics);
+ scmLayoutVersionManager, reconContext);
+ SCMContainerPlacementMetrics placementMetrics =
SCMContainerPlacementMetrics.create();
+ PlacementPolicy containerPlacementPolicy =
ContainerPlacementPolicyFactory.getPolicy(conf, nodeManager,
+ clusterMap, true, placementMetrics);
this.datanodeProtocolServer = new ReconDatanodeProtocolServer(
conf, this, eventQueue);
this.pipelineManager = ReconPipelineManager.newReconPipelineManager(
@@ -390,9 +384,9 @@ public void start() {
"Recon ScmDatanodeProtocol RPC server",
getDatanodeProtocolServer().getDatanodeRpcAddress()));
}
- scheduler = Executors.newScheduledThreadPool(1,
+ ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1,
new ThreadFactoryBuilder().setNameFormat(threadNamePrefix +
- "SyncSCMContainerInfo-%d")
+ "SyncSCMContainerInfo-%d")
.build());
boolean isSCMSnapshotEnabled = ozoneConfiguration.getBoolean(
ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED,
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
index 8c68143717..9b20efb4f1 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
@@ -116,8 +116,6 @@ public class OzoneManagerServiceProviderImpl
LoggerFactory.getLogger(OzoneManagerServiceProviderImpl.class);
private URLConnectionFactory connectionFactory;
- private int omDBTarProcessorThreadCount; // Number of parallel workers
-
private File omSnapshotDBParentDir = null;
private File reconDbDir = null;
private String omDBSnapshotUrl;
@@ -226,7 +224,8 @@ public OzoneManagerServiceProviderImpl(
this.threadFactory =
new ThreadFactoryBuilder().setNameFormat(threadNamePrefix +
"SyncOM-%d")
.build();
- this.omDBTarProcessorThreadCount = Math.max(64,
Runtime.getRuntime().availableProcessors());
+ // Number of parallel workers
+ int omDBTarProcessorThreadCount = Math.max(64,
Runtime.getRuntime().availableProcessors());
this.reconContext = reconContext;
this.taskStatusUpdaterManager = taskStatusUpdaterManager;
this.omDBLagThreshold =
configuration.getLong(RECON_OM_DELTA_UPDATE_LAG_THRESHOLD,
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/InitialConstraintUpgradeAction.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/InitialConstraintUpgradeAction.java
index 4857929cf8..21a8be5dc1 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/InitialConstraintUpgradeAction.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/InitialConstraintUpgradeAction.java
@@ -43,13 +43,11 @@
public class InitialConstraintUpgradeAction implements ReconUpgradeAction {
private static final Logger LOG =
LoggerFactory.getLogger(InitialConstraintUpgradeAction.class);
- private DataSource dataSource;
private DSLContext dslContext;
@Override
public void execute(DataSource source) throws SQLException {
- dataSource = source;
- try (Connection conn = dataSource.getConnection()) {
+ try (Connection conn = source.getConnection()) {
if (!TABLE_EXISTS_CHECK.test(conn, UNHEALTHY_CONTAINERS_TABLE_NAME)) {
return;
}
@@ -98,11 +96,6 @@ public UpgradeActionType getType() {
return FINALIZE;
}
- @VisibleForTesting
- public void setDataSource(DataSource dataSource) {
- this.dataSource = dataSource;
- }
-
@VisibleForTesting
public void setDslContext(DSLContext dslContext) {
this.dslContext = dslContext;
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/UnhealthyContainerReplicaMismatchAction.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/UnhealthyContainerReplicaMismatchAction.java
index d50b16557e..3002a5362a 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/UnhealthyContainerReplicaMismatchAction.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/UnhealthyContainerReplicaMismatchAction.java
@@ -41,13 +41,11 @@
@UpgradeActionRecon(feature = UNHEALTHY_CONTAINER_REPLICA_MISMATCH, type =
FINALIZE)
public class UnhealthyContainerReplicaMismatchAction implements
ReconUpgradeAction {
private static final Logger LOG =
LoggerFactory.getLogger(UnhealthyContainerReplicaMismatchAction.class);
- private DataSource dataSource;
private DSLContext dslContext;
@Override
public void execute(DataSource source) throws Exception {
- this.dataSource = source;
- try (Connection conn = dataSource.getConnection()) {
+ try (Connection conn = source.getConnection()) {
if (!TABLE_EXISTS_CHECK.test(conn, UNHEALTHY_CONTAINERS_TABLE_NAME)) {
return;
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestInitialConstraintUpgradeAction.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestInitialConstraintUpgradeAction.java
index f6618a6da1..7c4c8e5512 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestInitialConstraintUpgradeAction.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestInitialConstraintUpgradeAction.java
@@ -61,7 +61,6 @@ public void setUp() throws SQLException {
when(mockScmFacade.getDataSource()).thenReturn(dataSource);
// Set the DataSource and DSLContext directly
- upgradeAction.setDataSource(dataSource);
upgradeAction.setDslContext(dslContext);
// Check if the table already exists
diff --git
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java
index 54a7263d64..15c1c58b49 100644
---
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java
+++
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java
@@ -46,6 +46,8 @@ public final class S3GatewayMetrics implements Closeable,
MetricsSource {
public static final String SOURCE_NAME =
S3GatewayMetrics.class.getSimpleName();
+ // TODO: https://issues.apache.org/jira/browse/HDDS-13555
+ @SuppressWarnings("PMD.SingularField")
private MetricsRegistry registry;
private static S3GatewayMetrics instance;
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/utils/ContainerLogFileParser.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/utils/ContainerLogFileParser.java
index f4d5c53ad2..f0f6649b30 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/utils/ContainerLogFileParser.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/utils/ContainerLogFileParser.java
@@ -40,7 +40,6 @@
public class ContainerLogFileParser {
- private ExecutorService executorService;
private static final int MAX_OBJ_IN_LIST = 5000;
private static final String LOG_FILE_MARKER = ".log.";
@@ -67,7 +66,7 @@ public void processLogEntries(String logDirectoryPath,
ContainerDatanodeDatabase
List<Path> files =
paths.filter(Files::isRegularFile).collect(Collectors.toList());
- executorService = Executors.newFixedThreadPool(threadCount);
+ ExecutorService executorService =
Executors.newFixedThreadPool(threadCount);
CountDownLatch latch = new CountDownLatch(files.size());
for (Path file : files) {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]