This is an automated email from the ASF dual-hosted git repository.
sumitagrawal pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 7ed9249abc6 HDDS-13370. Create a separate audit log for background
deletion service (#8858)
7ed9249abc6 is described below
commit 7ed9249abc6775a8aeee8169ed66faad31a3a81b
Author: Sarveksha Yeshavantha Raju
<[email protected]>
AuthorDate: Thu Sep 11 11:48:16 2025 +0530
HDDS-13370. Create a separate audit log for background deletion service
(#8858)
---
.../apache/hadoop/ozone/audit/AuditMessage.java | 16 ++++++-
.../hadoop/ozone/audit/TestOzoneAuditLogger.java | 4 +-
.../ozone/client/rpc/OzoneRpcClientTests.java | 18 ++++----
.../TestDirectoryDeletingServiceWithFSO.java | 6 +--
.../apache/hadoop/ozone/audit/OMSystemAction.java | 6 ++-
.../key/OMDirectoriesPurgeRequestWithFSO.java | 51 +++++++++++++++++++-
.../ozone/om/request/key/OMKeyPurgeRequest.java | 33 +++++++++++--
.../snapshot/OMSnapshotMoveDeletedKeysRequest.java | 54 ++++++++++++++++++++++
.../request/snapshot/OMSnapshotPurgeRequest.java | 21 ++++++++-
.../ozone/om/request/key/TestOMKeyRequest.java | 14 +++++-
.../snapshot/TestOMSnapshotDeleteRequest.java | 2 +-
.../snapshot/TestSnapshotRequestAndResponse.java | 6 +++
.../hadoop/ozone/s3/TestS3GatewayAuditLog.java | 18 ++++----
13 files changed, 215 insertions(+), 34 deletions(-)
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
index a1f9903e61c..b5fd9656b7d 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.audit;
+import com.fasterxml.jackson.databind.ObjectMapper;
import java.util.Map;
import java.util.function.Supplier;
import org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder;
@@ -29,6 +30,7 @@
public final class AuditMessage implements Message {
private static final long serialVersionUID = 1L;
+ private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private final transient Supplier<String> messageSupplier;
private final String op;
@@ -125,7 +127,19 @@ private String formMessage(String userStr, String ipStr,
String opStr,
PerformanceStringBuilder performanceMap) {
String perf = performanceMap != null
? " | perf=" + performanceMap.build() : "";
+ String params = formatParamsAsJson(paramsMap);
return "user=" + userStr + " | ip=" + ipStr + " | " + "op=" + opStr
- + " " + paramsMap + " | ret=" + retStr + perf;
+ + " " + params + " | ret=" + retStr + perf;
+ }
+
+ private String formatParamsAsJson(Map<String, String> paramsMap) {
+ if (paramsMap == null || paramsMap.isEmpty()) {
+ return "{}";
+ }
+ try {
+ return OBJECT_MAPPER.writeValueAsString(paramsMap);
+ } catch (Exception e) {
+ return paramsMap.toString();
+ }
}
}
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
index a84f46a2c93..3e4e325a54f 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
@@ -177,7 +177,7 @@ public void messageIncludesAllParts() {
assertThat(message).contains(USER);
assertThat(message).contains(IP_ADDRESS);
assertThat(message).contains(DummyAction.CREATE_VOLUME.name());
- assertThat(message).contains(PARAMS.toString());
+ assertThat(message).contains(PARAMS.values());
assertThat(message).contains(FAILURE.getStatus());
}
@@ -214,7 +214,7 @@ public void messageIncludesMultilineException() throws
IOException {
verifyLog(
"ERROR | OMAudit | ? | user=john | "
+ "ip=192.168.0.1 | op=CREATE_VOLUME "
- + "{key1=value1, key2=value2} | ret=FAILURE",
+ + "{\"key1\":\"value1\",\"key2\":\"value2\"} | ret=FAILURE",
"org.apache.hadoop.ozone.audit."
+ "TestOzoneAuditLogger$TestException: Dummy exception message",
"at org.apache.hadoop.ozone.audit.TestOzoneAuditLogger"
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
index 5cd57f9db19..accbf6c8310 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
@@ -1138,16 +1138,16 @@ public void testDeleteAuditLog() throws Exception {
bucket.deleteKeys(keysToDelete);
String consoleOutput = output.get();
- assertThat(consoleOutput).contains("op=DELETE_KEY {volume=" + volumeName +
", bucket=" + bucketName +
- ", key=key1, dataSize=" + valueLength + ",
replicationConfig=RATIS/THREE");
- assertThat(consoleOutput).contains("op=DELETE_KEY {volume=" + volumeName +
", bucket=" + bucketName +
- ", key=key2, dataSize=" + valueLength + ",
replicationConfig=EC{rs-3-2-1024k}");
- assertThat(consoleOutput).contains("op=DELETE_KEY {volume=" + volumeName +
", bucket=" + bucketName +
- ", key=dir1, Transaction");
- assertThat(consoleOutput).contains("op=DELETE_KEYS {volume=" + volumeName
+ ", bucket=" + bucketName +
- ", deletedKeysList={key=dir1/key4, dataSize=" + valueLength +
+ assertThat(consoleOutput).contains("op=DELETE_KEY {\"volume\":\"" +
volumeName + "\",\"bucket\":\"" + bucketName +
+ "\",\"key\":\"key1\",\"dataSize\":\"" + valueLength +
"\",\"replicationConfig\":\"RATIS/THREE");
+ assertThat(consoleOutput).contains("op=DELETE_KEY {\"volume\":\"" +
volumeName + "\",\"bucket\":\"" + bucketName +
+ "\",\"key\":\"key2\",\"dataSize\":\"" + valueLength +
"\",\"replicationConfig\":\"EC{rs-3-2-1024k}");
+ assertThat(consoleOutput).contains("op=DELETE_KEY {\"volume\":\"" +
volumeName + "\",\"bucket\":\"" + bucketName +
+ "\",\"key\":\"dir1\",\"Transaction\"");
+ assertThat(consoleOutput).contains("op=DELETE_KEYS {\"volume\":\"" +
volumeName + "\",\"bucket\":\"" + bucketName +
+ "\",\"deletedKeysList\":\"{key=dir1/key4, dataSize=" + valueLength +
", replicationConfig=RATIS/THREE}, {key=dir1/key5, dataSize=" +
valueLength +
- ", replicationConfig=EC{rs-3-2-1024k}}, unDeletedKeysList=");
+ ", replicationConfig=EC{rs-3-2-1024k}}\",\"unDeletedKeysList\"");
}
protected void verifyReplication(String volumeName, String bucketName,
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingServiceWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingServiceWithFSO.java
index 44678059f97..c8ebcb083f1 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingServiceWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingServiceWithFSO.java
@@ -271,7 +271,7 @@ public void testDeleteWithLargeSubPathsThanBatchSize()
throws Exception {
assertEquals(15, metrics.getNumSubFilesMovedToDeletedTable());
assertEquals(19, metrics.getNumDirsPurged());
assertEquals(19, metrics.getNumDirsSentForPurge());
- assertEquals(18, metrics.getNumSubDirsMovedToDeletedDirTable());
+ assertEquals(0, metrics.getNumSubDirsMovedToDeletedDirTable());
assertEquals(18, metrics.getNumSubDirsSentForPurge());
assertThat(dirDeletingService.getRunCount().get()).isGreaterThan(1);
@@ -327,7 +327,7 @@ public void testDeleteWithMultiLevels() throws Exception {
assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 5);
assertEquals(5, metrics.getNumDirsSentForPurge());
assertEquals(5, metrics.getNumDirsPurged());
- assertEquals(4, metrics.getNumSubDirsMovedToDeletedDirTable());
+ assertEquals(0, metrics.getNumSubDirsMovedToDeletedDirTable());
assertEquals(4, metrics.getNumSubDirsSentForPurge());
assertEquals(3, metrics.getNumSubFilesSentForPurge());
assertEquals(3, metrics.getNumSubFilesMovedToDeletedTable());
@@ -381,7 +381,7 @@ public void testDeleteWithLessDirsButMultipleLevels()
throws Exception {
assertEquals(2, metrics.getNumDirsSentForPurge());
assertEquals(2, metrics.getNumDirsPurged());
- assertEquals(1, metrics.getNumSubDirsMovedToDeletedDirTable());
+ assertEquals(0, metrics.getNumSubDirsMovedToDeletedDirTable());
assertEquals(1, metrics.getNumSubDirsSentForPurge());
assertEquals(1, metrics.getNumSubFilesSentForPurge());
assertEquals(1, metrics.getNumSubFilesMovedToDeletedTable());
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMSystemAction.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMSystemAction.java
index 76f66d74350..622f68070b3 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMSystemAction.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMSystemAction.java
@@ -22,7 +22,11 @@
* as present for request.
*/
public enum OMSystemAction implements AuditAction {
- STARTUP;
+ STARTUP,
+ DIRECTORY_DELETION,
+ KEY_DELETION,
+ SNAPSHOT_MOVE_DEL_KEYS,
+ SNAPSHOT_PURGE;
@Override
public String getAction() {
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java
index 5593097361e..3e817b9c86c 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java
@@ -25,6 +25,7 @@
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -34,6 +35,9 @@
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.AuditLoggerType;
+import org.apache.hadoop.ozone.audit.OMSystemAction;
import org.apache.hadoop.ozone.om.DeletingServiceMetrics;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
@@ -55,12 +59,21 @@
* Handles purging of keys from OM DB.
*/
public class OMDirectoriesPurgeRequestWithFSO extends OMKeyRequest {
+ private static final AuditLogger AUDIT = new
AuditLogger(AuditLoggerType.OMSYSTEMLOGGER);
+ private static final String AUDIT_PARAM_DIRS_DELETED = "directoriesDeleted";
+ private static final String AUDIT_PARAM_SUBDIRS_MOVED =
"subdirectoriesMoved";
+ private static final String AUDIT_PARAM_SUBFILES_MOVED = "subFilesMoved";
+ private static final String AUDIT_PARAM_DIRS_DELETED_LIST =
"directoriesDeletedList";
+ private static final String AUDIT_PARAM_SUBDIRS_MOVED_LIST =
"subdirectoriesMovedList";
+ private static final String AUDIT_PARAM_SUBFILES_MOVED_LIST =
"subFilesMovedList";
+ private static final String AUDIT_PARAM_SNAPSHOT_ID = "snapshotId";
public OMDirectoriesPurgeRequestWithFSO(OMRequest omRequest) {
super(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED);
}
@Override
+ @SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
ExecutionContext context) {
OzoneManagerProtocolProtos.PurgeDirectoriesRequest purgeDirsRequest =
getOmRequest().getPurgeDirectoriesRequest();
@@ -78,6 +91,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager
ozoneManager, Execut
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
getOmRequest());
final SnapshotInfo fromSnapshotInfo;
+
+ Set<String> subDirNames = new HashSet<>();
+ Set<String> subFileNames = new HashSet<>();
+ Set<String> deletedDirNames = new HashSet<>();
+
try {
fromSnapshotInfo = fromSnapshot != null ?
SnapshotUtils.getSnapshotInfo(ozoneManager,
fromSnapshot) : null;
@@ -95,6 +113,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager
ozoneManager, Execut
}
} catch (IOException e) {
LOG.error("Error occurred while performing OMDirectoriesPurge. ", e);
+
AUDIT.logWriteFailure(ozoneManager.buildAuditMessageForFailure(OMSystemAction.DIRECTORY_DELETION,
null, e));
return new
OMDirectoriesPurgeResponseWithFSO(createErrorOMResponse(omResponse, e));
}
try {
@@ -103,6 +122,14 @@ public OMClientResponse
validateAndUpdateCache(OzoneManager ozoneManager, Execut
for (OzoneManagerProtocolProtos.KeyInfo key :
path.getMarkDeletedSubDirsList()) {
OmKeyInfo keyInfo = OmKeyInfo.getFromProtobuf(key);
+
+ String pathKey =
omMetadataManager.getOzonePathKey(path.getVolumeId(),
+ path.getBucketId(), keyInfo.getParentObjectID(),
keyInfo.getFileName());
+ String deleteKey = omMetadataManager.getOzoneDeletePathKey(
+ keyInfo.getObjectID(), pathKey);
+
+ subDirNames.add(deleteKey);
+
String volumeName = keyInfo.getVolumeName();
String bucketName = keyInfo.getBucketName();
Pair<String, String> volBucketPair = Pair.of(volumeName, bucketName);
@@ -112,7 +139,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager
ozoneManager, Execut
lockSet.add(volBucketPair);
}
omMetrics.decNumKeys();
- numSubDirMoved++;
OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager,
volumeName, bucketName);
// bucketInfo can be null in case of delete volume or bucket
@@ -131,6 +157,13 @@ public OMClientResponse
validateAndUpdateCache(OzoneManager ozoneManager, Execut
for (OzoneManagerProtocolProtos.KeyInfo key :
path.getDeletedSubFilesList()) {
OmKeyInfo keyInfo = OmKeyInfo.getFromProtobuf(key);
+
+ String pathKey =
omMetadataManager.getOzonePathKey(path.getVolumeId(),
+ path.getBucketId(), keyInfo.getParentObjectID(),
keyInfo.getFileName());
+ String deleteKey = omMetadataManager.getOzoneDeletePathKey(
+ keyInfo.getObjectID(), pathKey);
+ subFileNames.add(deleteKey);
+
String volumeName = keyInfo.getVolumeName();
String bucketName = keyInfo.getBucketName();
Pair<String, String> volBucketPair = Pair.of(volumeName, bucketName);
@@ -172,22 +205,38 @@ public OMClientResponse
validateAndUpdateCache(OzoneManager ozoneManager, Execut
}
}
if (path.hasDeletedDir()) {
+ deletedDirNames.add(path.getDeletedDir());
numDirsDeleted++;
}
}
+
+ // Remove deletedDirNames from subDirNames to avoid duplication
+ subDirNames.removeAll(deletedDirNames);
+ numSubDirMoved = subDirNames.size();
deletingServiceMetrics.incrNumSubDirectoriesMoved(numSubDirMoved);
deletingServiceMetrics.incrNumSubFilesMoved(numSubFilesMoved);
deletingServiceMetrics.incrNumDirPurged(numDirsDeleted);
+ Map<String, String> auditParams = new LinkedHashMap<>();
if (fromSnapshotInfo != null) {
fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString());
omMetadataManager.getSnapshotInfoTable().addCacheEntry(new
CacheKey<>(fromSnapshotInfo.getTableKey()),
CacheValue.get(context.getIndex(), fromSnapshotInfo));
+ auditParams.put(AUDIT_PARAM_SNAPSHOT_ID,
fromSnapshotInfo.getSnapshotId().toString());
}
+
+ auditParams.put(AUDIT_PARAM_DIRS_DELETED,
String.valueOf(numDirsDeleted));
+ auditParams.put(AUDIT_PARAM_SUBDIRS_MOVED,
String.valueOf(numSubDirMoved));
+ auditParams.put(AUDIT_PARAM_SUBFILES_MOVED,
String.valueOf(numSubFilesMoved));
+ auditParams.put(AUDIT_PARAM_DIRS_DELETED_LIST, String.join(",",
deletedDirNames));
+ auditParams.put(AUDIT_PARAM_SUBDIRS_MOVED_LIST, String.join(",",
subDirNames));
+ auditParams.put(AUDIT_PARAM_SUBFILES_MOVED_LIST, String.join(",",
subFileNames));
+
AUDIT.logWriteSuccess(ozoneManager.buildAuditMessageForSuccess(OMSystemAction.DIRECTORY_DELETION,
auditParams));
} catch (IOException ex) {
// Case of IOException for fromProtobuf will not happen
// as this is created and send within OM
// only case of upgrade where compatibility is broken can have
+
AUDIT.logWriteFailure(ozoneManager.buildAuditMessageForFailure(OMSystemAction.DIRECTORY_DELETION,
null, ex));
throw new IllegalStateException(ex);
} finally {
lockSet.stream().forEach(e -> omMetadataManager.getLock()
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
index aa47d640e71..64e3fa31244 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
@@ -22,11 +22,16 @@
import java.io.IOException;
import java.util.ArrayList;
+import java.util.LinkedHashMap;
import java.util.List;
+import java.util.Map;
import java.util.UUID;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.AuditLoggerType;
+import org.apache.hadoop.ozone.audit.OMSystemAction;
import org.apache.hadoop.ozone.om.DeletingServiceMetrics;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
@@ -53,6 +58,13 @@ public class OMKeyPurgeRequest extends OMKeyRequest {
private static final Logger LOG =
LoggerFactory.getLogger(OMKeyPurgeRequest.class);
+ private static final AuditLogger AUDIT = new
AuditLogger(AuditLoggerType.OMSYSTEMLOGGER);
+ private static final String AUDIT_PARAM_KEYS_DELETED = "keysDeleted";
+ private static final String AUDIT_PARAM_RENAMED_KEYS_PURGED =
"renamedKeysPurged";
+ private static final String AUDIT_PARAMS_DELETED_KEYS_LIST =
"deletedKeysList";
+ private static final String AUDIT_PARAMS_RENAMED_KEYS_LIST =
"renamedKeysList";
+ private static final String AUDIT_PARAM_SNAPSHOT_ID = "snapshotId";
+
public OMKeyPurgeRequest(OMRequest omRequest) {
super(omRequest);
}
@@ -68,7 +80,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager
ozoneManager, Execut
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
getOmRequest());
-
final SnapshotInfo fromSnapshotInfo;
try {
fromSnapshotInfo = fromSnapshot != null ?
SnapshotUtils.getSnapshotInfo(ozoneManager,
@@ -88,6 +99,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager
ozoneManager, Execut
}
} catch (IOException e) {
LOG.error("Error occurred while performing OmKeyPurge. ", e);
+
AUDIT.logWriteFailure(ozoneManager.buildAuditMessageForFailure(OMSystemAction.KEY_DELETION,
null, e));
return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, e));
}
@@ -105,20 +117,33 @@ public OMClientResponse
validateAndUpdateCache(OzoneManager ozoneManager, Execut
deletingServiceMetrics.incrNumRenameEntriesPurged(renamedKeysToBePurged.size());
if (keysToBePurgedList.isEmpty() && renamedKeysToBePurged.isEmpty()) {
- return new OMKeyPurgeResponse(createErrorOMResponse(omResponse,
- new OMException("None of the keys can be purged be purged since a
new snapshot was created for all the " +
- "buckets, making this request invalid",
OMException.ResultCodes.KEY_DELETION_ERROR)));
+ OMException oe = new OMException("None of the keys can be purged be
purged since a new snapshot was created " +
+ "for all the buckets, making this request invalid",
OMException.ResultCodes.KEY_DELETION_ERROR);
+
AUDIT.logWriteFailure(ozoneManager.buildAuditMessageForFailure(OMSystemAction.KEY_DELETION,
null, oe));
+ return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, oe));
}
// Setting transaction info for snapshot, this is to prevent duplicate
purge requests to OM from background
// services.
try {
+ Map<String, String> auditParams = new LinkedHashMap<>();
if (fromSnapshotInfo != null) {
fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString());
omMetadataManager.getSnapshotInfoTable().addCacheEntry(new
CacheKey<>(fromSnapshotInfo.getTableKey()),
CacheValue.get(context.getIndex(), fromSnapshotInfo));
+ auditParams.put(AUDIT_PARAM_SNAPSHOT_ID,
fromSnapshotInfo.getSnapshotId().toString());
+ }
+ auditParams.put(AUDIT_PARAM_KEYS_DELETED,
String.valueOf(numKeysDeleted));
+ auditParams.put(AUDIT_PARAM_RENAMED_KEYS_PURGED,
String.valueOf(renamedKeysToBePurged.size()));
+ if (!keysToBePurgedList.isEmpty()) {
+ auditParams.put(AUDIT_PARAMS_DELETED_KEYS_LIST, String.join(",",
keysToBePurgedList));
+ }
+ if (!renamedKeysToBePurged.isEmpty()) {
+ auditParams.put(AUDIT_PARAMS_RENAMED_KEYS_LIST, String.join(",",
renamedKeysToBePurged));
}
+
AUDIT.logWriteSuccess(ozoneManager.buildAuditMessageForSuccess(OMSystemAction.KEY_DELETION,
auditParams));
} catch (IOException e) {
+
AUDIT.logWriteFailure(ozoneManager.buildAuditMessageForFailure(OMSystemAction.KEY_DELETION,
null, e));
return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, e));
}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java
index 87289039177..4714c77a349 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java
@@ -20,8 +20,13 @@
import static
org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT;
import java.io.IOException;
+import java.util.LinkedHashMap;
import java.util.List;
+import java.util.Map;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.AuditLoggerType;
+import org.apache.hadoop.ozone.audit.OMSystemAction;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.SnapshotChainManager;
@@ -44,6 +49,20 @@
*/
public class OMSnapshotMoveDeletedKeysRequest extends OMClientRequest {
+ private static final AuditLogger AUDIT = new
AuditLogger(AuditLoggerType.OMSYSTEMLOGGER);
+ private static final String AUDIT_PARAM_FROM_SNAPSHOT_ID = "fromSnapshotId";
+ private static final String AUDIT_PARAM_FROM_SNAPSHOT_TABLE_KEY =
"fromSnapshotTableKey";
+ private static final String AUDIT_PARAM_NEXT_SNAPSHOT_ID = "nextSnapshotId";
+ private static final String AUDIT_PARAM_NEXT_SNAPSHOT_TABLE_KEY =
"nextSnapshotTableKey";
+ private static final String AUDIT_PARAM_KEYS_MOVED = "keysMoved";
+ private static final String AUDIT_PARAM_RENAMED_KEYS_MOVED =
"renamedKeysMoved";
+ private static final String AUDIT_PARAM_DIRS_MOVED = "dirsMoved";
+ private static final String AUDIT_PARAM_RECLAIM_KEYS = "reclaimKeys";
+ private static final String AUDIT_PARAM_KEYS_MOVED_LIST = "keysMovedList";
+ private static final String AUDIT_PARAM_RENAMED_KEYS_LIST =
"renamedKeysList";
+ private static final String AUDIT_PARAM_DIRS_MOVED_LIST = "dirsMovedList";
+ private static final String AUDIT_PARAM_RECLAIM_KEYS_LIST =
"reclaimKeysList";
+
public OMSnapshotMoveDeletedKeysRequest(OMRequest omRequest) {
super(omRequest);
}
@@ -67,6 +86,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager
ozoneManager, Execut
OMClientResponse omClientResponse = null;
OzoneManagerProtocolProtos.OMResponse.Builder omResponse =
OmResponseUtil.getOMResponseBuilder(getOmRequest());
+ Map<String, String> auditParams = new LinkedHashMap<>();
try {
// Check the snapshot exists.
SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(ozoneManager,
fromSnapshot.getTableKey());
@@ -84,9 +104,43 @@ public OMClientResponse validateAndUpdateCache(OzoneManager
ozoneManager, Execut
omResponse.build(), fromSnapshot, nextSnapshot,
nextDBKeysList, reclaimKeysList, renamedKeysList, movedDirs);
+ auditParams.put(AUDIT_PARAM_FROM_SNAPSHOT_ID,
fromSnapshot.getSnapshotId().toString());
+ auditParams.put(AUDIT_PARAM_FROM_SNAPSHOT_TABLE_KEY,
fromSnapshot.getTableKey());
+ if (nextSnapshot != null) {
+ auditParams.put(AUDIT_PARAM_NEXT_SNAPSHOT_ID,
nextSnapshot.getSnapshotId().toString());
+ auditParams.put(AUDIT_PARAM_NEXT_SNAPSHOT_TABLE_KEY,
nextSnapshot.getTableKey());
+ }
+ auditParams.put(AUDIT_PARAM_KEYS_MOVED,
String.valueOf(nextDBKeysList.size()));
+ auditParams.put(AUDIT_PARAM_RENAMED_KEYS_MOVED,
String.valueOf(renamedKeysList.size()));
+ auditParams.put(AUDIT_PARAM_DIRS_MOVED,
String.valueOf(movedDirs.size()));
+ auditParams.put(AUDIT_PARAM_RECLAIM_KEYS,
String.valueOf(reclaimKeysList.size()));
+
+ if (!nextDBKeysList.isEmpty()) {
+ auditParams.put(AUDIT_PARAM_KEYS_MOVED_LIST,
nextDBKeysList.toString());
+ }
+ if (!renamedKeysList.isEmpty()) {
+ auditParams.put(AUDIT_PARAM_RENAMED_KEYS_LIST,
renamedKeysList.toString());
+ }
+ if (!movedDirs.isEmpty()) {
+ auditParams.put(AUDIT_PARAM_DIRS_MOVED_LIST, movedDirs.toString());
+ }
+ if (!reclaimKeysList.isEmpty()) {
+ auditParams.put(AUDIT_PARAM_RECLAIM_KEYS_LIST,
reclaimKeysList.toString());
+ }
+
AUDIT.logWriteSuccess(ozoneManager.buildAuditMessageForSuccess(OMSystemAction.SNAPSHOT_MOVE_DEL_KEYS,
+ auditParams));
+
} catch (IOException ex) {
omClientResponse = new OMSnapshotMoveDeletedKeysResponse(
createErrorOMResponse(omResponse, ex));
+ auditParams.put(AUDIT_PARAM_FROM_SNAPSHOT_ID,
fromSnapshot.getSnapshotId().toString());
+ auditParams.put(AUDIT_PARAM_FROM_SNAPSHOT_TABLE_KEY,
fromSnapshot.getTableKey());
+ if (nextSnapshot != null) {
+ auditParams.put(AUDIT_PARAM_NEXT_SNAPSHOT_ID,
nextSnapshot.getSnapshotId().toString());
+ auditParams.put(AUDIT_PARAM_NEXT_SNAPSHOT_TABLE_KEY,
nextSnapshot.getTableKey());
+ }
+
AUDIT.logWriteFailure(ozoneManager.buildAuditMessageForFailure(OMSystemAction.SNAPSHOT_MOVE_DEL_KEYS,
+ auditParams, ex));
}
return omClientResponse;
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java
index 3dd2842ed3b..55d851da198 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java
@@ -19,6 +19,7 @@
import java.io.IOException;
import java.util.HashMap;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
@@ -26,6 +27,9 @@
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.AuditLoggerType;
+import org.apache.hadoop.ozone.audit.OMSystemAction;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OmSnapshotInternalMetrics;
@@ -52,6 +56,11 @@ public class OMSnapshotPurgeRequest extends OMClientRequest {
private static final Logger LOG =
LoggerFactory.getLogger(OMSnapshotPurgeRequest.class);
+ private static final AuditLogger AUDIT = new
AuditLogger(AuditLoggerType.OMSYSTEMLOGGER);
+ private static final String AUDIT_PARAM_SNAPSHOTS_PURGED = "snapshotsPurged";
+ private static final String AUDIT_PARAM_SNAPSHOT_DB_KEYS = "snapshotsDBKeys";
+ private static final String AUDIT_PARAM_SNAPSHOTS_UPDATED =
"snapshotsUpdated";
+
/**
* This map contains up to date snapshotInfo and works as a local cache for
OMSnapshotPurgeRequest.
* Since purge and other updates happen in sequence inside
validateAndUpdateCache, we can get updated snapshotInfo
@@ -81,9 +90,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager
ozoneManager, Execut
SnapshotPurgeRequest snapshotPurgeRequest = getOmRequest()
.getSnapshotPurgeRequest();
+ Map<String, String> auditParams = new LinkedHashMap<>();
+ List<String> snapshotDbKeys = snapshotPurgeRequest
+ .getSnapshotDBKeysList();
try {
- List<String> snapshotDbKeys = snapshotPurgeRequest
- .getSnapshotDBKeysList();
// Each snapshot purge operation does three things:
// 1. Update the deep clean flag for the next active snapshot (So that
it can be
@@ -125,11 +135,18 @@ public OMClientResponse
validateAndUpdateCache(OzoneManager ozoneManager, Execut
omSnapshotIntMetrics.incNumSnapshotPurges();
LOG.info("Successfully executed snapshotPurgeRequest: {{}} along with
updating snapshots:{}.",
snapshotPurgeRequest, updatedSnapshotInfos);
+
+ auditParams.put(AUDIT_PARAM_SNAPSHOTS_PURGED,
String.valueOf(snapshotDbKeys.size()));
+ auditParams.put(AUDIT_PARAM_SNAPSHOT_DB_KEYS, snapshotDbKeys.toString());
+ auditParams.put(AUDIT_PARAM_SNAPSHOTS_UPDATED,
updatedSnapshotInfos.toString());
+
AUDIT.logWriteSuccess(ozoneManager.buildAuditMessageForSuccess(OMSystemAction.SNAPSHOT_PURGE,
auditParams));
} catch (IOException ex) {
omClientResponse = new OMSnapshotPurgeResponse(
createErrorOMResponse(omResponse, ex));
omSnapshotIntMetrics.incNumSnapshotPurgeFails();
+ auditParams.put(AUDIT_PARAM_SNAPSHOT_DB_KEYS, snapshotDbKeys.toString());
LOG.error("Failed to execute snapshotPurgeRequest:{{}}.",
snapshotPurgeRequest, ex);
+
AUDIT.logWriteFailure(ozoneManager.buildAuditMessageForFailure(OMSystemAction.SNAPSHOT_PURGE,
auditParams, ex));
}
return omClientResponse;
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
index bccaabe82a6..1d29e37d80e 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
@@ -164,6 +165,11 @@ public void setup() throws Exception {
new
OmBucketInfo.Builder().setVolumeName("").setBucketName("").build());
doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
+ AuditMessage mockAuditMessage = mock(AuditMessage.class);
+ when(mockAuditMessage.getOp()).thenReturn("MOCK_OP");
+ when(ozoneManager.buildAuditMessageForSuccess(any(),
any())).thenReturn(mockAuditMessage);
+ when(ozoneManager.buildAuditMessageForFailure(any(), any(),
any())).thenReturn(mockAuditMessage);
+
setupReplicationConfigValidation(ozoneManager, ozoneConfiguration);
scmClient = mock(ScmClient.class);
@@ -225,8 +231,14 @@ public void setup() throws Exception {
return allocatedBlocks;
});
+ ContainerInfo containerInfo = new ContainerInfo.Builder()
+ .setContainerID(1L)
+ .setState(HddsProtos.LifeCycleState.OPEN)
+
.setReplicationConfig(RatisReplicationConfig.getInstance(ReplicationFactor.ONE))
+ .setPipelineID(pipeline.getId())
+ .build();
ContainerWithPipeline containerWithPipeline =
- new ContainerWithPipeline(Mockito.mock(ContainerInfo.class), pipeline);
+ new ContainerWithPipeline(containerInfo, pipeline);
when(scmContainerLocationProtocol.getContainerWithPipeline(anyLong())).thenReturn(containerWithPipeline);
volumeName = UUID.randomUUID().toString();
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java
index 65ec8af82c9..267e99829ac 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java
@@ -152,7 +152,7 @@ public void testValidateAndUpdateCache() throws Exception {
// add key to cache
SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(getVolumeName(),
getBucketName(),
- snapshotName, null, Time.now());
+ snapshotName, UUID.randomUUID(), Time.now());
assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus());
getOmMetadataManager().getSnapshotInfoTable().addCacheEntry(
new CacheKey<>(key),
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java
index adb6a638415..d9e81693dd8 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java
@@ -171,6 +171,12 @@ public void baseSetup() throws Exception {
AuditLogger auditLogger = mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
+
+ AuditMessage mockAuditMessage = mock(AuditMessage.class);
+ when(mockAuditMessage.getOp()).thenReturn("MOCK_OP");
+ when(ozoneManager.buildAuditMessageForSuccess(any(),
any())).thenReturn(mockAuditMessage);
+ when(ozoneManager.buildAuditMessageForFailure(any(), any(),
any())).thenReturn(mockAuditMessage);
+
batchOperation = omMetadataManager.getStore().initBatchOperation();
volumeName = UUID.randomUUID().toString();
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestS3GatewayAuditLog.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestS3GatewayAuditLog.java
index ee1d78fe884..4075cfa29db 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestS3GatewayAuditLog.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestS3GatewayAuditLog.java
@@ -123,9 +123,9 @@ public void testHeadBucket() throws Exception {
bucketEndpoint.head(bucketName);
String expected = "INFO | S3GAudit | ? | user=null | ip=null | " +
- "op=HEAD_BUCKET {bucket=[bucket], x-amz-request-id=" +
- requestIdentifier.getRequestId() + ", x-amz-id-2=" +
- requestIdentifier.getAmzId() + "} | ret=SUCCESS";
+ "op=HEAD_BUCKET {\"bucket\":\"[bucket]\",\"x-amz-request-id\":\"" +
+ requestIdentifier.getRequestId() + "\",\"x-amz-id-2\":\"" +
+ requestIdentifier.getAmzId() + "\"} | ret=SUCCESS";
verifyLog(expected);
}
@@ -134,9 +134,9 @@ public void testListBucket() throws Exception {
rootEndpoint.get().getEntity();
String expected = "INFO | S3GAudit | ? | user=null | ip=null | " +
- "op=LIST_S3_BUCKETS {x-amz-request-id=" +
- requestIdentifier.getRequestId() + ", x-amz-id-2=" +
- requestIdentifier.getAmzId() + "} | ret=SUCCESS";
+ "op=LIST_S3_BUCKETS {\"x-amz-request-id\":\"" +
+ requestIdentifier.getRequestId() + "\",\"x-amz-id-2\":\"" +
+ requestIdentifier.getAmzId() + "\"} | ret=SUCCESS";
verifyLog(expected);
}
@@ -155,9 +155,9 @@ public void testHeadObject() throws Exception {
keyEndpoint.head(bucketName, "key1");
String expected = "INFO | S3GAudit | ? | user=null | ip=null | " +
- "op=HEAD_KEY {bucket=[bucket], path=[key1], x-amz-request-id=" +
- requestIdentifier.getRequestId() + ", x-amz-id-2=" +
- requestIdentifier.getAmzId() + "} | ret=SUCCESS";
+ "op=HEAD_KEY
{\"bucket\":\"[bucket]\",\"path\":\"[key1]\",\"x-amz-request-id\":\"" +
+ requestIdentifier.getRequestId() + "\",\"x-amz-id-2\":\"" +
+ requestIdentifier.getAmzId() + "\"} | ret=SUCCESS";
verifyLog(expected);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]