This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new a88ff710dd HDDS-10979. Support STANDARD_IA S3 storage class to accept 
EC replication config (#8399)
a88ff710dd is described below

commit a88ff710dde6823f79e747d271677e3b6f2dc81c
Author: SaketaChalamchala <[email protected]>
AuthorDate: Wed May 21 12:56:06 2025 -0700

    HDDS-10979. Support STANDARD_IA S3 storage class to accept EC replication 
config (#8399)
---
 .../hadoop/ozone/shell/ReplicationOptions.java     |   2 +-
 .../ozone/shell/keys/ChecksumKeyHandler.java       |   2 +-
 .../hadoop/ozone/client}/OzoneClientUtils.java     |   5 +-
 .../hadoop/ozone/client}/TestOzoneClientUtils.java |   4 +-
 .../dist/src/main/compose/common/ec-test.sh        |   2 +
 .../src/main/smoketest/ec/awss3ecstorage.robot     |  90 ++++++++++
 .../dist/src/main/smoketest/s3/awss3.robot         |   8 +
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |   1 +
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |   1 +
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |   1 +
 .../fs/ozone/BasicRootedOzoneFileSystem.java       |   1 +
 .../hadoop/ozone/s3/endpoint/BucketEndpoint.java   |   9 +-
 .../hadoop/ozone/s3/endpoint/EndpointBase.java     |   3 +-
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |  37 ++---
 .../hadoop/ozone/s3/exception/S3ErrorTable.java    |   6 +
 .../org/apache/hadoop/ozone/s3/util/S3Consts.java  |   1 +
 .../apache/hadoop/ozone/s3/util/S3StorageType.java |  57 ++-----
 .../org/apache/hadoop/ozone/s3/util/S3Utils.java   |  70 +++-----
 .../hadoop/ozone/s3/endpoint/TestObjectPut.java    |   2 +-
 .../apache/hadoop/ozone/s3/util/TestS3Utils.java   | 182 ++++++++++-----------
 20 files changed, 260 insertions(+), 224 deletions(-)

diff --git 
a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ReplicationOptions.java
 
b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ReplicationOptions.java
index c9dad3df88..ad13b3b1ae 100644
--- 
a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ReplicationOptions.java
+++ 
b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ReplicationOptions.java
@@ -22,10 +22,10 @@
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
 
 import java.util.Optional;
-import org.apache.hadoop.fs.ozone.OzoneClientUtils;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.ozone.client.OzoneClientUtils;
 
 /**
  * Common options for specifying replication config: specialized for
diff --git 
a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java
 
b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java
index 489abed039..c36c1af991 100644
--- 
a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java
+++ 
b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java
@@ -17,7 +17,7 @@
 
 package org.apache.hadoop.ozone.shell.keys;
 
-import static 
org.apache.hadoop.fs.ozone.OzoneClientUtils.getFileChecksumWithCombineMode;
+import static 
org.apache.hadoop.ozone.client.OzoneClientUtils.getFileChecksumWithCombineMode;
 
 import com.fasterxml.jackson.annotation.JsonAutoDetect;
 import java.io.IOException;
diff --git 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
similarity index 98%
rename from 
hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java
rename to 
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
index a885116902..0d5504bb16 100644
--- 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.fs.ozone;
+package org.apache.hadoop.ozone.client;
 
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
@@ -36,9 +36,6 @@
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.checksum.BaseFileChecksumHelper;
 import org.apache.hadoop.ozone.client.checksum.ChecksumHelperFactory;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
diff --git 
a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneClientUtils.java
 
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientUtils.java
similarity index 98%
rename from 
hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneClientUtils.java
rename to 
hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientUtils.java
index baff7bb67d..c2154fe880 100644
--- 
a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneClientUtils.java
+++ 
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientUtils.java
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.fs.ozone;
+package org.apache.hadoop.ozone.client;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNull;
@@ -32,8 +32,6 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.junit.jupiter.api.Test;
 
diff --git a/hadoop-ozone/dist/src/main/compose/common/ec-test.sh 
b/hadoop-ozone/dist/src/main/compose/common/ec-test.sh
index 04df2b2787..556590a14a 100755
--- a/hadoop-ozone/dist/src/main/compose/common/ec-test.sh
+++ b/hadoop-ozone/dist/src/main/compose/common/ec-test.sh
@@ -30,3 +30,5 @@ docker-compose up -d --no-recreate --scale datanode=3
 execute_robot_test scm -v PREFIX:${prefix} -N read-3-datanodes ec/read.robot
 docker-compose up -d --no-recreate --scale datanode=5
 execute_robot_test scm -v container:1 -v count:5 -N EC-recovery 
replication/wait.robot
+docker-compose up -d --no-recreate --scale datanode=9
+execute_robot_test scm -N S3-EC-Storage ec/awss3ecstorage.robot
diff --git a/hadoop-ozone/dist/src/main/smoketest/ec/awss3ecstorage.robot 
b/hadoop-ozone/dist/src/main/smoketest/ec/awss3ecstorage.robot
new file mode 100644
index 0000000000..27ddffb0eb
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/ec/awss3ecstorage.robot
@@ -0,0 +1,90 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       S3 gateway test with aws cli with STANDARD_IA storage class
+Library             OperatingSystem
+Library             String
+Resource            ../commonlib.robot
+Resource            ../s3/commonawslib.robot
+Resource            ../s3/mpu_lib.robot
+Resource            ../ozone-lib/shell.robot
+Test Timeout        5 minutes
+Suite Setup         Setup EC Multipart Tests
+Suite Teardown      Teardown EC Multipart Tests
+Test Setup          Generate random prefix
+
+*** Keywords ***
+Setup EC Multipart Tests
+    Setup s3 tests
+    Create Random File KB    1023    /tmp/1mb
+
+Teardown EC Multipart Tests
+    Remove Files    /tmp/1mb
+
+*** Variables ***
+${ENDPOINT_URL}       http://s3g:9878
+${BUCKET}             generated
+
+*** Test Cases ***
+
+Put Object with STANDARD_IA storage class
+    ${file_checksum} =  Execute                    md5sum /tmp/1mb | awk 
'{print $1}'
+
+    ${result} =         Execute AWSS3ApiCli        put-object --bucket 
${BUCKET} --key ${PREFIX}/ecKey32 --body /tmp/1mb --storage-class STANDARD_IA
+    ${eTag} =           Execute                    echo '${result}' | jq -r 
'.ETag'
+                        Should Be Equal            ${eTag}           
\"${file_checksum}\"
+                        Verify Key EC Replication Config    
/s3v/${BUCKET}/${PREFIX}/ecKey32    RS    3    2    1048576
+
+    ${result} =         Execute AWSS3ApiCli        put-object --bucket 
${BUCKET} --key ${PREFIX}/ecKey63 --body /tmp/1mb --storage-class STANDARD_IA 
--metadata="storage-config=rs-6-3-1024k"
+    ${eTag} =           Execute                    echo '${result}' | jq -r 
'.ETag'
+                        Should Be Equal            ${eTag}           
\"${file_checksum}\"
+                        Verify Key EC Replication Config    
/s3v/${BUCKET}/${PREFIX}/ecKey63    RS    6    3    1048576
+
+Test multipart upload with STANDARD_IA storage
+    ${uploadID} =       Initiate MPU    ${BUCKET}    
${PREFIX}/ecmultipartKey32     0     --storage-class STANDARD_IA
+    ${eTag1} =          Upload MPU part    ${BUCKET}    
${PREFIX}/ecmultipartKey32    ${uploadID}    1    /tmp/1mb
+    ${result} =         Execute AWSS3APICli   list-parts --bucket ${BUCKET} 
--key ${PREFIX}/ecmultipartKey32 --upload-id ${uploadID}
+    ${part1} =          Execute               echo '${result}' | jq -r 
'.Parts[0].ETag'
+                        Should Be equal       ${part1}    ${eTag1}
+                        Should contain        ${result}    STANDARD_IA
+                        Complete MPU    ${BUCKET}    
${PREFIX}/ecmultipartKey32    ${uploadID}    {ETag=${eTag1},PartNumber=1}
+                        Verify Key EC Replication Config    
/s3v/${BUCKET}/${PREFIX}/ecmultipartKey32    RS    3    2    1048576
+
+    ${uploadID} =       Initiate MPU    ${BUCKET}    
${PREFIX}/ecmultipartKey63     0     --storage-class STANDARD_IA 
--metadata="storage-config=rs-6-3-1024k"
+    ${eTag1} =          Upload MPU part    ${BUCKET}    
${PREFIX}/ecmultipartKey63    ${uploadID}    1    /tmp/part1
+    ${result} =         Execute AWSS3APICli   list-parts --bucket ${BUCKET} 
--key ${PREFIX}/ecmultipartKey63 --upload-id ${uploadID}
+    ${part1} =          Execute               echo '${result}' | jq -r 
'.Parts[0].ETag'
+                        Should Be equal       ${part1}    ${eTag1}
+                        Should contain        ${result}    STANDARD_IA
+                        Complete MPU    ${BUCKET}    
${PREFIX}/ecmultipartKey63    ${uploadID}    {ETag=${eTag1},PartNumber=1}
+                        Verify Key EC Replication Config    
/s3v/${BUCKET}/${PREFIX}/ecmultipartKey63    RS    6    3    1048576
+
+Copy Object change storage class to STANDARD_IA
+    ${file_checksum} =  Execute                    md5sum /tmp/1mb | awk 
'{print $1}'
+    ${result} =         Execute AWSS3ApiCli        put-object --bucket 
${BUCKET} --key ${PREFIX}/copyobject/Key1 --body /tmp/1mb
+    ${eTag} =           Execute                    echo '${result}' | jq -r 
'.ETag'
+                        Should Be Equal            ${eTag}           
\"${file_checksum}\"
+
+     ${result} =         Execute AWSS3APICli        copy-object 
--storage-class STANDARD_IA --bucket ${BUCKET} --key ${PREFIX}/copyobject/Key1 
--copy-source ${BUCKET}/${PREFIX}/copyobject/Key1
+                         Should contain             ${result}        ETag
+     ${eTag} =           Execute                    echo '${result}' | jq -r 
'.CopyObjectResult.ETag'
+                         Should Be Equal            ${eTag}           
\"${file_checksum}\"
+
+     ${result} =         Execute AWSS3APICli        copy-object 
--storage-class STANDARD_IA --metadata="storage-config=rs-6-3-1024k" --bucket 
${BUCKET} --key ${PREFIX}/copyobject/Key1 --copy-source 
${BUCKET}/${PREFIX}/copyobject/Key1
+                         Should contain             ${result}        ETag
+     ${eTag} =           Execute                    echo '${result}' | jq -r 
'.CopyObjectResult.ETag'
+                         Should Be Equal            ${eTag}           
\"${file_checksum}\"
+                         ## TODO: Verify Key EC Replication Config when we 
support changing storage class
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot 
b/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot
index be2e24b6e4..f805f6ed4d 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot
@@ -46,6 +46,14 @@ File upload and directory list
                         Should not contain        ${result}         testfile
                         Should not contain        ${result}         dir1
                         Should contain            ${result}         file
+                        # Verify S3 storage class if file is replicated or 
erasure coded.
+    ${result} =         Execute AWSS3CliDebug   ls 
s3://${BUCKET}/dir1/dir2/file
+    IF    '${BUCKET}' == 'erasure'
+                        Should contain    ${result}    STANDARD_IA
+    ELSE
+                        Should contain    ${result}    STANDARD
+                        Should not contain    ${result}    STANDARD_IA
+    END
 
 File upload with special chars
                         Execute                   date > /tmp/testfile
diff --git 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index 1abddfc8af..8abe5147b5 100644
--- 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -67,6 +67,7 @@
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
diff --git 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index 8e15888890..67a252e695 100644
--- 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -74,6 +74,7 @@
 import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneFsServerDefaults;
+import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.io.SelectorOutputStream;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
diff --git 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index edbd7b9056..cd05b9de5e 100644
--- 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -81,6 +81,7 @@
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneSnapshot;
 import org.apache.hadoop.ozone.client.OzoneVolume;
diff --git 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
index 2e05b8e401..d355f59899 100644
--- 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
@@ -83,6 +83,7 @@
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneFsServerDefaults;
 import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.SelectorOutputStream;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index dc6536d3dc..cb641d1ea0 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@ -57,7 +57,6 @@
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.audit.S3GAction;
@@ -753,12 +752,8 @@ private void addKey(ListObjectResponse response, OzoneKey 
next) {
     if (eTag != null) {
       keyMetadata.setETag(ObjectEndpoint.wrapInQuotes(eTag));
     }
-    if (next.getReplicationType().toString().equals(ReplicationType
-        .STAND_ALONE.toString())) {
-      keyMetadata.setStorageClass(S3StorageType.REDUCED_REDUNDANCY.toString());
-    } else {
-      keyMetadata.setStorageClass(S3StorageType.STANDARD.toString());
-    }
+    keyMetadata.setStorageClass(S3StorageType.fromReplicationConfig(
+        next.getReplicationConfig()).toString());
     keyMetadata.setLastModified(next.getModificationTime());
     String displayName = next.getOwner();
     keyMetadata.setOwner(S3Owner.of(displayName));
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
index 59e6a6bd04..78e32d5bec 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
@@ -24,6 +24,7 @@
 import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.AWS_TAG_PREFIX;
 import static 
org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX;
+import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CONFIG_HEADER;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_KEY_LENGTH_LIMIT;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_NUM_LIMIT;
@@ -98,7 +99,7 @@ public abstract class EndpointBase implements Auditor {
   private ContainerRequestContext context;
 
   private Set<String> excludeMetadataFields =
-      new HashSet<>(Arrays.asList(OzoneConsts.GDPR_FLAG));
+      new HashSet<>(Arrays.asList(OzoneConsts.GDPR_FLAG, 
STORAGE_CONFIG_HEADER));
   private static final Logger LOG =
       LoggerFactory.getLogger(EndpointBase.class);
 
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 45e1f9d34e..7ebed80c28 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -27,9 +27,6 @@
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
-import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT;
 import static 
org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder;
 import static 
org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_DEFAULT;
 import static 
org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_KEY;
@@ -49,12 +46,14 @@
 import static 
org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_IF_MODIFIED_SINCE;
 import static 
org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_IF_UNMODIFIED_SINCE;
 import static 
org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_COPY_DIRECTIVE_HEADER;
+import static 
org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.CopyDirective;
 import static 
org.apache.hadoop.ozone.s3.util.S3Consts.DECODED_CONTENT_LENGTH_HEADER;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.MP_PARTS_COUNT;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER;
 import static 
org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER_SUPPORTED_UNIT;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
+import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CONFIG_HEADER;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_COUNT_HEADER;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_DIRECTIVE_HEADER;
 import static org.apache.hadoop.ozone.s3.util.S3Utils.hasMultiChunksPayload;
@@ -110,12 +109,12 @@
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
-import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.audit.S3GAction;
 import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneKeyDetails;
 import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts;
@@ -236,7 +235,7 @@ public Response put(
     boolean auditSuccess = true;
     PerformanceStringBuilder perf = new PerformanceStringBuilder();
 
-    String copyHeader = null, storageType = null;
+    String copyHeader = null, storageType = null, storageConfig = null;
     DigestInputStream digestInputStream = null;
     try {
       if (aclMarker != null) {
@@ -262,12 +261,13 @@ public Response put(
 
       copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
       storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
+      storageConfig = headers.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + 
STORAGE_CONFIG_HEADER);
       boolean storageTypeDefault = StringUtils.isEmpty(storageType);
 
       // Normal put object
       OzoneBucket bucket = volume.getBucket(bucketName);
       ReplicationConfig replicationConfig =
-          getReplicationConfig(bucket, storageType);
+          getReplicationConfig(bucket, storageType, storageConfig);
 
       boolean enableEC = false;
       if ((replicationConfig != null &&
@@ -819,6 +819,7 @@ public Response initializeMultipartUpload(
     try {
       OzoneBucket ozoneBucket = getBucket(bucket);
       String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
+      String storageConfig = 
headers.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + STORAGE_CONFIG_HEADER);
 
       Map<String, String> customMetadata =
           getCustomMetadataFromHeaders(headers.getRequestHeaders());
@@ -826,7 +827,7 @@ public Response initializeMultipartUpload(
       Map<String, String> tags = getTaggingFromHeaders(headers);
 
       ReplicationConfig replicationConfig =
-          getReplicationConfig(ozoneBucket, storageType);
+          getReplicationConfig(ozoneBucket, storageType, storageConfig);
 
       OmMultipartInfo multipartInfo =
           ozoneBucket.initiateMultipartUpload(key, replicationConfig, 
customMetadata, tags);
@@ -859,21 +860,12 @@ public Response initializeMultipartUpload(
   }
 
   private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket,
-      String storageType) throws OS3Exception {
-    if (StringUtils.isEmpty(storageType)) {
-      S3StorageType defaultStorageType = 
S3StorageType.getDefault(ozoneConfiguration);
-      storageType = (defaultStorageType != null ? 
defaultStorageType.toString() : null);
-    }
+      String storageType, String storageConfig) throws OS3Exception {
 
-    ReplicationConfig clientConfiguredReplicationConfig = null;
-    String replication = ozoneConfiguration.get(OZONE_REPLICATION);
-    if (replication != null) {
-      clientConfiguredReplicationConfig = ReplicationConfig.parse(
-          ReplicationType.valueOf(ozoneConfiguration
-              .get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT)),
-          replication, ozoneConfiguration);
-    }
-    return S3Utils.resolveS3ClientSideReplicationConfig(storageType,
+    ReplicationConfig clientConfiguredReplicationConfig =
+        
OzoneClientUtils.getClientConfiguredReplicationConfig(ozoneConfiguration);
+
+    return S3Utils.resolveS3ClientSideReplicationConfig(storageType, 
storageConfig,
         clientConfiguredReplicationConfig, ozoneBucket.getReplicationConfig());
   }
 
@@ -972,9 +964,10 @@ private Response createMultipartKey(OzoneVolume volume, 
String bucket,
 
       copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
       String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
+      String storageConfig = 
headers.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + STORAGE_CONFIG_HEADER);
       final OzoneBucket ozoneBucket = volume.getBucket(bucket);
       ReplicationConfig replicationConfig =
-          getReplicationConfig(ozoneBucket, storageType);
+          getReplicationConfig(ozoneBucket, storageType, storageConfig);
 
       boolean enableEC = false;
       if ((replicationConfig != null &&
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java
index b09ba5c954..200d9e8acb 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java
@@ -150,6 +150,12 @@ public final class S3ErrorTable {
       HTTP_FORBIDDEN
   );
 
+  public static final OS3Exception INVALID_STORAGE_CLASS = new OS3Exception(
+      "InvalidStorageClass", "The storage class that you specified is not 
valid. " +
+      "Provide a supported storage 
class[STANDARD|REDUCED_REDUNDANCY|STANDARD_IA] or " +
+      "a valid custom EC storage config for if using STANDARD_IA.",
+      HTTP_BAD_REQUEST);
+
   private static Function<Exception, OS3Exception> generateInternalError =
       e -> new OS3Exception("InternalError", e.getMessage(), 
HTTP_INTERNAL_ERROR);
 
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
index e5f49383fc..0e8b58dc80 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
@@ -72,6 +72,7 @@ public final class S3Consts {
   // Constants related to custom metadata
   public static final String CUSTOM_METADATA_HEADER_PREFIX = "x-amz-meta-";
   public static final String CUSTOM_METADATA_COPY_DIRECTIVE_HEADER = 
"x-amz-metadata-directive";
+  public static final String STORAGE_CONFIG_HEADER = "storage-config";
 
   public static final String DECODED_CONTENT_LENGTH_HEADER =
       "x-amz-decoded-content-length";
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
index 9f2b5f7772..c400bade41 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
@@ -18,12 +18,9 @@
 package org.apache.hadoop.ozone.s3.util;
 
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
 
 /**
  * Maps S3 storage class values to Ozone replication values.
@@ -31,54 +28,24 @@
 
 public enum S3StorageType {
 
-  REDUCED_REDUNDANCY(ReplicationType.RATIS, ReplicationFactor.ONE),
-  STANDARD(ReplicationType.RATIS, ReplicationFactor.THREE);
+  
REDUCED_REDUNDANCY(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)),
+  STANDARD(
+      RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)),
+  STANDARD_IA(new ECReplicationConfig(3, 2));
 
-  private final ReplicationType type;
-  private final ReplicationFactor factor;
+  private final ReplicationConfig replicationConfig;
 
-  S3StorageType(
-      ReplicationType type,
-      ReplicationFactor factor) {
-    this.type = type;
-    this.factor = factor;
+  S3StorageType(ReplicationConfig replicationConfig) {
+    this.replicationConfig = replicationConfig;
   }
 
-  public ReplicationFactor getFactor() {
-    return factor;
-  }
-
-  public ReplicationType getType() {
-    return type;
-  }
-
-  /**
-   * Get default S3StorageType for a new key to be uploaded.
-   * This should align to the ozone cluster configuration.
-   * @param config OzoneConfiguration
-   * @return S3StorageType which wraps ozone replication type and factor
-   */
-  public static S3StorageType getDefault(ConfigurationSource config) {
-    String replicationString = config.get(OzoneConfigKeys.OZONE_REPLICATION);
-    ReplicationFactor configFactor;
-    if (replicationString == null) {
-      // if no config is set then let server take decision
-      return null;
-    }
-    try {
-      configFactor = ReplicationFactor.valueOf(
-          Integer.parseInt(replicationString));
-    } catch (NumberFormatException ex) {
-      // conservatively defaults to STANDARD on wrong config value
-      return STANDARD;
-    }
-    return configFactor == ReplicationFactor.ONE
-        ? REDUCED_REDUNDANCY : STANDARD;
+  public ReplicationConfig getReplicationConfig() {
+    return replicationConfig;
   }
 
   public static S3StorageType fromReplicationConfig(ReplicationConfig config) {
-    if (config instanceof ECReplicationConfig) {
-      return S3StorageType.STANDARD;
+    if (config.getReplicationType() == HddsProtos.ReplicationType.EC) {
+      return STANDARD_IA;
     }
     if (config.getReplicationType() == HddsProtos.ReplicationType.STAND_ALONE 
||
         config.getRequiredNodes() == 1) {
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java
index 54ffc23c32..d9461bc2fb 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.ozone.s3.util;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
-import static 
org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_ARGUMENT;
+import static 
org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_STORAGE_CLASS;
 import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.AWS_CHUNKED;
 import static 
org.apache.hadoop.ozone.s3.util.S3Consts.DECODED_CONTENT_LENGTH_HEADER;
@@ -37,10 +37,9 @@
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.Response;
 import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.s3.exception.OS3Exception;
 import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
 
@@ -69,64 +68,41 @@ private S3Utils() {
    *
    * @param s3StorageTypeHeader        - s3 user passed storage type
    *                                   header.
-   * @param clientConfiguredReplConfig - Client side configured replication
-   *                                   config.
    * @param bucketReplConfig           - server side bucket default replication
    *                                   config.
+   * @param clientConfiguredReplConfig - Client side configured replication
+   *                                   config.
    * @return client resolved replication config.
    */
   public static ReplicationConfig resolveS3ClientSideReplicationConfig(
-      String s3StorageTypeHeader, ReplicationConfig clientConfiguredReplConfig,
+      String s3StorageTypeHeader, String s3StorageConfigHeader,
+      ReplicationConfig clientConfiguredReplConfig,
       ReplicationConfig bucketReplConfig)
       throws OS3Exception {
-    ReplicationConfig clientDeterminedReplConfig = null;
 
-    // Let's map the user provided s3 storage type header to ozone s3 storage
-    // type.
-    S3StorageType s3StorageType = null;
-    if (s3StorageTypeHeader != null && !s3StorageTypeHeader.equals("")) {
-      s3StorageType = toS3StorageType(s3StorageTypeHeader);
+    // If user provided s3 storage type header is not null then map it
+    // to ozone replication config
+    if (!StringUtils.isEmpty(s3StorageTypeHeader)) {
+      return toReplicationConfig(s3StorageTypeHeader, s3StorageConfigHeader);
     }
 
-    boolean isECBucket = bucketReplConfig != null && bucketReplConfig
-        .getReplicationType() == HddsProtos.ReplicationType.EC;
-
-    // if bucket replication config configured with EC, we will give high
-    // preference to server side bucket defaults.
-    // Why we give high preference to EC is, there is no way for file system
-    // interfaces to pass EC replication. So, if one configures EC at bucket,
-    // we consider EC to take preference. in short, keys created from file
-    // system under EC bucket will always be EC'd.
-    if (isECBucket) {
-      // if bucket is EC, don't bother client provided configs, let's pass
-      // bucket config.
-      clientDeterminedReplConfig = bucketReplConfig;
-    } else {
-      // Let's validate the client side available replication configs.
-      boolean isUserPassedReplicationInSupportedList =
-          s3StorageType != null && (s3StorageType.getFactor()
-              .getValue() == ReplicationFactor.ONE.getValue() || s3StorageType
-              .getFactor().getValue() == ReplicationFactor.THREE.getValue());
-      if (isUserPassedReplicationInSupportedList) {
-        clientDeterminedReplConfig = ReplicationConfig.fromProtoTypeAndFactor(
-            ReplicationType.toProto(s3StorageType.getType()),
-            ReplicationFactor.toProto(s3StorageType.getFactor()));
-      } else {
-        // API passed replication number is not in supported replication list.
-        // So, let's use whatever available in client side configured.
-        // By default it will be null, so server will use server defaults.
-        clientDeterminedReplConfig = clientConfiguredReplConfig;
-      }
-    }
-    return clientDeterminedReplConfig;
+    // If client configured replication config is null then default to bucket 
replication
+    // otherwise default to server side default replication config.
+    return (clientConfiguredReplConfig != null) ?
+        clientConfiguredReplConfig : bucketReplConfig;
   }
 
-  public static S3StorageType toS3StorageType(String storageType)
+  public static ReplicationConfig toReplicationConfig(String s3StorageType, 
String s3StorageConfig)
       throws OS3Exception {
     try {
-      return S3StorageType.valueOf(storageType);
+      S3StorageType storageType = S3StorageType.valueOf(s3StorageType);
+      if (S3StorageType.STANDARD_IA.equals(storageType) &&
+          !StringUtils.isEmpty(s3StorageConfig)) {
+        return new ECReplicationConfig(s3StorageConfig);
+      }
+      return storageType.getReplicationConfig();
     } catch (IllegalArgumentException ex) {
-      throw newError(INVALID_ARGUMENT, storageType, ex);
+      throw newError(INVALID_STORAGE_CLASS, s3StorageType, ex);
     }
   }
 
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
index 673fb6a756..e5c34fb4e4 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java
@@ -686,7 +686,7 @@ void testInvalidStorageType() {
 
     OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(
         BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, null, body));
-    assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(),
+    assertEquals(S3ErrorTable.INVALID_STORAGE_CLASS.getErrorMessage(),
         e.getErrorMessage());
     assertEquals("random", e.getResource());
   }
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java
index b616ea47a8..051cd5e9fd 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestS3Utils.java
@@ -21,128 +21,126 @@
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.s3.endpoint.S3Owner;
 import org.apache.hadoop.ozone.s3.exception.OS3Exception;
+import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
 
 /**
  * Tests the S3Utils APIs.
  */
 public class TestS3Utils {
-  private ReplicationConfig ecReplicationConfig =
-      new ECReplicationConfig("rs-3-2-1024K");
-  private ReplicationConfig ratis3ReplicationConfig =
+  private static final ReplicationConfig EC32REPLICATIONCONFIG =
+      new ECReplicationConfig(3, 2);
+  private static final ReplicationConfig RATIS3REPLICATIONCONFIG =
       RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE);
-  private ReplicationConfig ratis1ReplicationConfig =
+  private static final ReplicationConfig RATIS1REPLICATIONCONFIG =
       RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE);
 
-  @Test
-  public void testResolveClientSideRepConfigWhenBucketHasEC()
-      throws OS3Exception {
-    ReplicationConfig replicationConfig = S3Utils
-        .resolveS3ClientSideReplicationConfig(S3StorageType.STANDARD.name(),
-            null, ecReplicationConfig);
-    // Bucket default is EC.
-    assertEquals(ecReplicationConfig, replicationConfig);
-  }
+  private static final List<ReplicationConfig> REPLICATIONS = Arrays.asList(
+      null,
+      RATIS1REPLICATIONCONFIG,
+      RATIS3REPLICATIONCONFIG,
+      EC32REPLICATIONCONFIG
+  );
 
-  /**
-   * When bucket replication is null and it should respect user passed value.
-   */
-  @Test
-  public void testResolveClientSideRepConfigWhenBucketHasNull()
-      throws OS3Exception {
-    ReplicationConfig replicationConfig = S3Utils
-        .resolveS3ClientSideReplicationConfig(S3StorageType.STANDARD.name(),
-            null, null);
-    // Passed replication is 3 - Ozone mapped replication is ratis THREE
-    assertEquals(ratis3ReplicationConfig, replicationConfig);
-  }
+  private static final List<String> S3STORAGETYPES = Arrays.asList(
+      null,
+      "",
+      S3StorageType.STANDARD.name(),
+      S3StorageType.REDUCED_REDUNDANCY.name(),
+      S3StorageType.STANDARD_IA.name()
+  );
 
-  /**
-   * When bucket replication is null and it should return null if user passed
-   * value is invalid.
-   */
-  @Test
-  public void testResolveClientSideRepConfigWhenUserPassedReplicationIsEmpty()
-      throws OS3Exception {
-    ReplicationConfig replicationConfig =
-        S3Utils.resolveS3ClientSideReplicationConfig("", null, null);
-    // client configured value also null.
-    // This API caller should leave the decision to server.
-    assertNull(replicationConfig);
-  }
+  private static final List<String> S3STORAGECONFIG = Arrays.asList(
+      null,
+      "",
+      "rs-6-3-1024k"
+  );
 
-  /**
-   * When bucket default is non-EC and client side values are not valid, we
-   * would just return null, so servers can make decision in this case.
-   */
-  @Test
-  public void testResolveRepConfWhenUserPassedIsInvalidButBucketDefaultNonEC()
-      throws OS3Exception {
-    ReplicationConfig replicationConfig = S3Utils
-        .resolveS3ClientSideReplicationConfig(null, null,
-            ratis3ReplicationConfig);
-    // Configured client config also null.
-    assertNull(replicationConfig);
+  public static List<Arguments> validS3ReplicationConfigs() {
+    List<Arguments> args = new ArrayList<>();
+    for (String s3StorageType : S3STORAGETYPES) {
+      for (String s3StorageConfig : S3STORAGECONFIG) {
+        for (ReplicationConfig clientReplConfig : REPLICATIONS) {
+          for (ReplicationConfig bucketReplConfig: REPLICATIONS) {
+            args.add(Arguments.of(s3StorageType, s3StorageConfig, 
clientReplConfig, bucketReplConfig));
+          }
+        }
+      }
+    }
+    return args;
   }
 
-  /**
-   * When bucket default is non-EC and client side value is valid, we
-   * would should return client side valid value.
-   */
-  @Test
-  public void testResolveRepConfWhenUserPassedIsValidButBucketDefaultNonEC()
+  @ParameterizedTest
+  @MethodSource("validS3ReplicationConfigs")
+  public void testValidResolveS3ClientSideReplicationConfig(String 
s3StorageType, String s3StorageConfig,
+      ReplicationConfig clientConfiguredReplConfig, ReplicationConfig 
bucketReplConfig)
       throws OS3Exception {
     ReplicationConfig replicationConfig = S3Utils
-        .resolveS3ClientSideReplicationConfig(
-            S3StorageType.REDUCED_REDUNDANCY.name(), null,
-            ratis3ReplicationConfig);
-    // Passed value is replication one - Ozone mapped value is ratis ONE
-    assertEquals(ratis1ReplicationConfig, replicationConfig);
-  }
+        .resolveS3ClientSideReplicationConfig(s3StorageType, s3StorageConfig,
+            clientConfiguredReplConfig, bucketReplConfig);
 
-  /**
-   * When bucket default is EC and client side value also valid, we would just
-   * return bucket default EC.
-   */
-  @Test
-  public void testResolveRepConfWhenUserPassedIsValidButBucketDefaultEC()
-      throws OS3Exception {
-    ReplicationConfig replicationConfig = S3Utils
-        .resolveS3ClientSideReplicationConfig(S3StorageType.STANDARD.name(),
-            ratis3ReplicationConfig, ecReplicationConfig);
-    // Bucket default is EC
-    assertEquals(ecReplicationConfig, replicationConfig);
+    final ReplicationConfig expectedReplConfig;
+    if (!StringUtils.isEmpty(s3StorageType)) {
+      if (S3StorageType.STANDARD_IA.name().equals(s3StorageType)) {
+        if (!StringUtils.isEmpty(s3StorageConfig)) {
+          expectedReplConfig = new ECReplicationConfig(s3StorageConfig);
+        } else {
+          expectedReplConfig = EC32REPLICATIONCONFIG;
+        }
+      } else if (S3StorageType.STANDARD.name().equals(s3StorageType)) {
+        expectedReplConfig = RATIS3REPLICATIONCONFIG;
+      } else {
+        expectedReplConfig = RATIS1REPLICATIONCONFIG;
+      }
+    } else if (clientConfiguredReplConfig != null) {
+      expectedReplConfig = clientConfiguredReplConfig;
+    } else if (bucketReplConfig != null) {
+      expectedReplConfig = bucketReplConfig;
+    } else {
+      expectedReplConfig = null;
+    }
+
+    if (expectedReplConfig == null) {
+      assertNull(replicationConfig);
+    } else {
+      assertEquals(expectedReplConfig, replicationConfig);
+    }
   }
 
-  /**
-   * When bucket default is non-EC and client side passed value also not valid
-   * but configured value is valid, we would just return configured value.
-   */
-  @Test
-  public void testResolveRepConfWhenUserPassedIsInvalidAndBucketDefaultNonEC()
-      throws OS3Exception {
-    ReplicationConfig replicationConfig = S3Utils
-        .resolveS3ClientSideReplicationConfig(null, ratis3ReplicationConfig,
-            ratis1ReplicationConfig);
-    // Configured value is ratis THREE
-    assertEquals(ratis3ReplicationConfig, replicationConfig);
+  public static List<Arguments> invalidS3ReplicationConfigs() {
+    List<Arguments> args = new ArrayList<>();
+    args.add(Arguments.of("GLACIER", null, RATIS3REPLICATIONCONFIG, 
RATIS1REPLICATIONCONFIG));
+    args.add(Arguments.of(S3StorageType.STANDARD_IA.name(), "INVALID",
+        RATIS3REPLICATIONCONFIG, RATIS1REPLICATIONCONFIG));
+    return args;
   }
 
   /**
-   * When bucket default is non-EC and client side passed value also not valid
-   * but configured value is valid, we would just return configured value.
+   * When client side passed value also not valid
+   * OS3Exception is thrown.
    */
-  @Test
-  public void testResolveRepConfWhenUserPassedIsInvalid() throws OS3Exception {
-    assertThrows(OS3Exception.class, () -> S3Utils.
+  @ParameterizedTest
+  @MethodSource("invalidS3ReplicationConfigs")
+  public void testResolveRepConfWhenUserPassedIsInvalid(String s3StorageType, 
String s3StorageConfig,
+      ReplicationConfig clientConfiguredReplConfig, ReplicationConfig 
bucketReplConfig)
+      throws OS3Exception {
+    OS3Exception exception = assertThrows(OS3Exception.class, () -> S3Utils.
         resolveS3ClientSideReplicationConfig(
-            "INVALID", ratis3ReplicationConfig, ratis1ReplicationConfig));
+            s3StorageType, s3StorageConfig, clientConfiguredReplConfig, 
bucketReplConfig));
+    assertEquals(S3ErrorTable.INVALID_STORAGE_CLASS.getCode(), 
exception.getCode());
   }
 
   @Test


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to