This is an automated email from the ASF dual-hosted git repository.

kirs pushed a commit to branch branch-refactor_property
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-refactor_property by 
this push:
     new 6f4568e36ca [Fix](storage)Unify globList Implementation Using AWS SDK 
and Optimize S3TVF Handling (#49596)
6f4568e36ca is described below

commit 6f4568e36ca1de120fc566e8e400a5731faddc28
Author: Calvin Kirs <guoqi...@selectdb.com>
AuthorDate: Fri Mar 28 10:53:32 2025 +0800

    [Fix](storage)Unify globList Implementation Using AWS SDK and Optimize 
S3TVF Handling (#49596)
    
    ### Background
    
    Previously, the globList implementation used two different protocols for
    object storage access, leading to inconsistencies between the Frontend
    (FE) and Backend (BE). To resolve this issue, we are migrating globList
    to use the native AWS SDK, ensuring a unified access approach across
    both FE and BE. This change reduces protocol discrepancies, improves
    maintainability, and is expected to offer performance benefits (to be
    validated via benchmarking).
    
    Additionally, we have adjusted the S3 Table-Valued Function (S3TVF)
    handling of region and endpoint. Instead of explicitly specifying these
    parameters, they are now extracted directly from the S3 URL. As a
    result, we have rolled back the previous commit that introduced explicit
    region and endpoint settings. However, we still need to discuss whether
    similar changes should be applied consistently across other parts of the
    system.
    
    ### Changes
    
    - Migrated globList to AWS SDK Native Implementation
    
    - Replaced the existing implementation with AWS SDK’s listObjectsV2 API
    to ensure consistency across object storage operations.
    
    - Eliminated the need to maintain two different protocols for listing
    objects.
    
    - Improved alignment between FE and BE storage access.
    
    Fix S3 storage
---
 .../datasource/property/ConnectionProperties.java  |   2 +-
 .../datasource/property/storage/COSProperties.java |   8 +-
 .../property/storage/HDFSProperties.java           |   7 +-
 .../datasource/property/storage/OBSProperties.java |   5 +-
 .../datasource/property/storage/OSSProperties.java |   3 +-
 .../datasource/property/storage/S3Properties.java  |  20 +-
 .../java/org/apache/doris/fs/obj/S3ObjStorage.java |  46 +++
 .../org/apache/doris/fs/remote/S3FileSystem.java   |  77 +----
 .../doris/tablefunction/S3TableValuedFunction.java |  43 ++-
 .../property/storage/COSPropertiesTest.java        |  55 +---
 .../property/storage/OBSPropertyTest.java          |  53 ----
 .../property/storage/OSSPropertiesTest.java        |  53 ----
 .../property/storage/S3PropertiesTest.java         |  58 ----
 .../data/s3_storage/test_s3_tvf_s3_storage.out     | Bin 4899 -> 8496 bytes
 .../backup_restore_cos.groovy                      | 317 +++++++++++++--------
 .../refactor_storage_param_p0/s3_load.groovy       | 297 +++++++++++++++++++
 .../s3_storage/test_s3_tvf_s3_storage.groovy       |  70 ++---
 17 files changed, 638 insertions(+), 476 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/ConnectionProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/ConnectionProperties.java
index bb66d4b6f22..20e0a225b65 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/ConnectionProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/ConnectionProperties.java
@@ -85,7 +85,7 @@ public abstract class ConnectionProperties {
 
     // Subclass can override this method to return the property name of 
resource config.
     protected String getResourceConfigPropName() {
-        return "";
+        return null;
     }
 
     // This method will check if all required properties are set.
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/COSProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/COSProperties.java
index 5416819dd36..997ed0e3b87 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/COSProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/COSProperties.java
@@ -28,15 +28,15 @@ import java.util.regex.Pattern;
 
 public class COSProperties extends AbstractObjectStorageProperties {
 
-    @ConnectorProperty(names = {"cos.endpoint"},
+    @ConnectorProperty(names = {"cos.endpoint", "s3.endpoint"},
             required = false,
             description = "The endpoint of COS.")
-    protected String cosEndpoint = "cos.ap-guangzhou.myqcloud.com";
+    protected String cosEndpoint = "";
 
-    @ConnectorProperty(names = {"cos.region"},
+    @ConnectorProperty(names = {"cos.region", "s3.region"},
             required = false,
             description = "The region of COS.")
-    protected String cosRegion = "";
+    protected String cosRegion = "ap-guangzhou";
 
     @ConnectorProperty(names = {"cos.access_key"},
             description = "The access key of S3.")
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HDFSProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HDFSProperties.java
index 0b30b3f59b7..c7a6a69b111 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HDFSProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HDFSProperties.java
@@ -79,10 +79,9 @@ public class HDFSProperties extends StorageProperties {
             return;
         }
         finalHdfsConfig = new HashMap<>();
-        Configuration configuration = new Configuration();
-        origProps.forEach((k, v) -> {
-            if (null != configuration.getTrimmed(k)) {
-                finalHdfsConfig.put(k, v);
+        origProps.forEach((key, value) -> {
+            if (key.startsWith("hadoop.") || key.startsWith("dfs.")) {
+                finalHdfsConfig.put(key, value);
             }
         });
 
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/OBSProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/OBSProperties.java
index d561eb5fdca..45086365ae8 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/OBSProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/OBSProperties.java
@@ -28,7 +28,7 @@ import java.util.regex.Pattern;
 
 public class OBSProperties extends AbstractObjectStorageProperties {
 
-    @ConnectorProperty(names = {"obs.endpoint"}, required = false, description 
= "The endpoint of OBS.")
+    @ConnectorProperty(names = {"obs.endpoint", "s3.endpoint"}, required = 
false, description = "The endpoint of OBS.")
     protected String obsEndpoint = "obs.cn-east-3.myhuaweicloud.com";
 
     @ConnectorProperty(names = {"obs.access_key"}, description = "The access 
key of OBS.")
@@ -38,7 +38,8 @@ public class OBSProperties extends 
AbstractObjectStorageProperties {
     protected String obsSecretKey = "";
 
 
-    private String region;
+    @ConnectorProperty(names = {"obs.region", "s3.region"}, required = false, 
description = "The region of OBS.")
+    protected String region;
 
     public OBSProperties(Map<String, String> origProps) {
         super(Type.OBS, origProps);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/OSSProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/OSSProperties.java
index 3a5ede7a539..625eb395328 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/OSSProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/OSSProperties.java
@@ -27,7 +27,7 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 public class OSSProperties extends AbstractObjectStorageProperties {
-    @ConnectorProperty(names = {"oss.endpoint"}, required = false, description 
= "The endpoint of OSS.")
+    @ConnectorProperty(names = {"oss.endpoint", "s3.endpoint"}, required = 
false, description = "The endpoint of OSS.")
     protected String endpoint = "oss-cn-hangzhou.aliyuncs.com";
 
     @ConnectorProperty(names = {"oss.access_key"}, description = "The access 
key of OSS.")
@@ -36,6 +36,7 @@ public class OSSProperties extends 
AbstractObjectStorageProperties {
     @ConnectorProperty(names = {"oss.secret_key"}, description = "The secret 
key of OSS.")
     protected String secretKey = "";
 
+    @ConnectorProperty(names = {"oss.region", "s3.region"}, required = false, 
description = "The region of OSS.")
     protected String region;
 
 
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/S3Properties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/S3Properties.java
index 6963a0b024a..0e200108634 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/S3Properties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/S3Properties.java
@@ -21,7 +21,6 @@ import org.apache.doris.datasource.property.ConnectorProperty;
 import org.apache.doris.datasource.property.metastore.AWSGlueProperties;
 import org.apache.doris.datasource.property.metastore.AliyunDLFProperties;
 
-import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.paimon.options.Options;
@@ -34,14 +33,12 @@ import java.util.Map;
 public class S3Properties extends AbstractObjectStorageProperties {
 
     @ConnectorProperty(names = {"s3.endpoint", "AWS_ENDPOINT"},
-            required = false,
             description = "The endpoint of S3.")
     protected String s3Endpoint = "";
 
     @ConnectorProperty(names = {"s3.region", "AWS_REGION"},
-            required = false,
             description = "The region of S3.")
-    protected String s3Region = "";
+    protected String s3Region = "us-east-1";
 
     @ConnectorProperty(names = {"s3.access_key", "AWS_ACCESS_KEY"},
             description = "The access key of S3.")
@@ -96,11 +93,6 @@ public class S3Properties extends 
AbstractObjectStorageProperties {
 
     public S3Properties(Map<String, String> origProps) {
         super(Type.S3, origProps);
-        if (Strings.isNullOrEmpty(s3Region)) {
-            // Some object storage services do not have region concept, eg: 
minio.
-            // Use a default one.
-            s3Endpoint = "us-east-1";
-        }
     }
 
     /**
@@ -110,6 +102,9 @@ public class S3Properties extends 
AbstractObjectStorageProperties {
      * @return
      */
     public static boolean guessIsMe(Map<String, String> origProps) {
+        if (origProps.containsKey("s3.access_key") || 
origProps.containsKey("AWS_ACCESS_KEY")) {
+            return true;
+        }
         List<Field> fields = getIdentifyFields();
         return StorageProperties.checkIdentifierKey(origProps, fields);
     }
@@ -118,10 +113,9 @@ public class S3Properties extends 
AbstractObjectStorageProperties {
         List<Field> fields = Lists.newArrayList();
         try {
             //todo AliyunDlfProperties should in OSS storage type.
-            fields.add(S3Properties.class.getDeclaredField("s3Endpoint"));
-            
fields.add(AliyunDLFProperties.class.getDeclaredField("dlfEndpoint"));
-            
fields.add(AliyunDLFProperties.class.getDeclaredField("dlfRegion"));
-            
fields.add(AWSGlueProperties.class.getDeclaredField("glueEndpoint"));
+            fields.add(S3Properties.class.getDeclaredField("s3AccessKey"));
+            
fields.add(AliyunDLFProperties.class.getDeclaredField("dlfAccessKey"));
+            
fields.add(AWSGlueProperties.class.getDeclaredField("glueAccessKey"));
             return fields;
         } catch (NoSuchFieldException e) {
             // should not happen
diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/obj/S3ObjStorage.java 
b/fe/fe-core/src/main/java/org/apache/doris/fs/obj/S3ObjStorage.java
index a6411aea219..8d391d9bb2d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/fs/obj/S3ObjStorage.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/fs/obj/S3ObjStorage.java
@@ -24,6 +24,7 @@ import org.apache.doris.common.credentials.CloudCredential;
 import org.apache.doris.common.util.S3URI;
 import org.apache.doris.common.util.S3Util;
 import 
org.apache.doris.datasource.property.storage.AbstractObjectStorageProperties;
+import org.apache.doris.fs.remote.RemoteFile;
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.tuple.Triple;
@@ -59,6 +60,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
+import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 
 public class S3ObjStorage implements ObjStorage<S3Client> {
@@ -108,6 +110,50 @@ public class S3ObjStorage implements ObjStorage<S3Client> {
         return client;
     }
 
+
+
+    public Status globList(String remotePath, List<RemoteFile> result, boolean 
fileNameOnly) {
+        try {
+            URI uri = new URI(remotePath);
+            String bucketName = uri.getHost();
+            String prefix = uri.getPath().substring(1);
+            int wildcardIndex = prefix.indexOf('*');
+            String searchPrefix = wildcardIndex > 0 ? prefix.substring(0, 
wildcardIndex) : prefix;
+            try (S3Client s3 = getClient()) {
+                ListObjectsV2Request listRequest = 
ListObjectsV2Request.builder()
+                        .bucket(bucketName)
+                        .prefix(searchPrefix)
+                        .build();
+
+                ListObjectsV2Response listResponse = 
s3.listObjectsV2(listRequest);
+                String regex = prefix.replace(".", "\\.")
+                        .replace("*", ".*")
+                        .replace("?", ".");
+                Pattern pattern = Pattern.compile(regex);
+                List<RemoteFile> matchedFiles = 
listResponse.contents().stream()
+                        .filter(obj -> pattern.matcher(obj.key()).matches())
+                        .map(obj -> {
+                            String fullKey = obj.key();
+                            String fullPath = "s3://" + bucketName + "/" + 
fullKey;
+                            return new RemoteFile(
+                                    fileNameOnly ? 
fullPath.substring(fullPath.lastIndexOf('/') + 1) : fullPath,
+                                    true,
+                                    obj.size(),
+                                    -1,
+                                    obj.lastModified().toEpochMilli()
+                            );
+                        })
+                        .collect(Collectors.toList());
+
+                result.addAll(matchedFiles);
+            }
+            return Status.OK;
+        } catch (Exception e) {
+            LOG.warn("Errors while getting file status", e);
+            return new Status(Status.ErrCode.COMMON_ERROR, "Errors while 
getting file status " + e.getMessage());
+        }
+    }
+
     @Override
     public Triple<String, String, String> getStsToken() throws DdlException {
         return null;
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/S3FileSystem.java 
b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/S3FileSystem.java
index 555d1e42508..8ebf0d4d01e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/S3FileSystem.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/S3FileSystem.java
@@ -20,24 +20,16 @@ package org.apache.doris.fs.remote;
 import org.apache.doris.analysis.StorageBackend;
 import org.apache.doris.backup.Status;
 import org.apache.doris.common.UserException;
-import org.apache.doris.common.security.authentication.AuthenticationConfig;
 import org.apache.doris.common.security.authentication.HadoopAuthenticator;
 import 
org.apache.doris.datasource.property.storage.AbstractObjectStorageProperties;
 import org.apache.doris.fs.obj.S3ObjStorage;
 
-import com.amazonaws.services.s3.model.AmazonS3Exception;
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
 import java.util.List;
-import java.util.Map;
 
 public class S3FileSystem extends ObjFileSystem {
 
@@ -66,77 +58,18 @@ public class S3FileSystem extends ObjFileSystem {
         this.properties.putAll(storageProperties.getOrigProps());
     }
 
+
     @Override
     protected FileSystem nativeFileSystem(String remotePath) throws 
UserException {
-        //todo Extracting a common method to achieve logic reuse
-        if (closed.get()) {
-            throw new UserException("FileSystem is closed.");
-        }
-        if (dfsFileSystem == null) {
-            synchronized (this) {
-                if (closed.get()) {
-                    throw new UserException("FileSystem is closed.");
-                }
-                if (dfsFileSystem == null) {
-                    Configuration conf = s3Properties.getHadoopConfiguration();
-                    System.setProperty("com.amazonaws.services.s3.enableV4", 
"true");
-                    AuthenticationConfig authConfig = 
AuthenticationConfig.getSimpleAuthenticationConfig(conf);
-                    HadoopAuthenticator authenticator = 
HadoopAuthenticator.getHadoopAuthenticator(authConfig);
-                    try {
-                        dfsFileSystem = authenticator.doAs(() -> {
-                            try {
-                                return FileSystem.get(new 
Path(remotePath).toUri(), conf);
-                            } catch (IOException e) {
-                                throw new RuntimeException(e);
-                            }
-                        });
-                        this.authenticator = authenticator;
-                        RemoteFSPhantomManager.registerPhantomReference(this);
-                    } catch (Exception e) {
-                        throw new UserException("Failed to get S3 FileSystem 
for " + e.getMessage(), e);
-                    }
-                }
-            }
-        }
-        return dfsFileSystem;
+        return null;
+
     }
 
     // broker file pattern glob is too complex, so we use hadoop directly
     @Override
     public Status globList(String remotePath, List<RemoteFile> result, boolean 
fileNameOnly) {
-        try {
-            FileSystem s3AFileSystem = nativeFileSystem(remotePath);
-            Path pathPattern = new Path(remotePath);
-            FileStatus[] files = s3AFileSystem.globStatus(pathPattern);
-            if (files == null) {
-                return Status.OK;
-            }
-            for (FileStatus fileStatus : files) {
-                RemoteFile remoteFile = new RemoteFile(
-                        fileNameOnly ? fileStatus.getPath().getName() : 
fileStatus.getPath().toString(),
-                        !fileStatus.isDirectory(), fileStatus.isDirectory() ? 
-1 : fileStatus.getLen(),
-                        fileStatus.getBlockSize(), 
fileStatus.getModificationTime());
-                result.add(remoteFile);
-            }
-        } catch (FileNotFoundException e) {
-            LOG.info("file not found: " + e.getMessage());
-            return new Status(Status.ErrCode.NOT_FOUND, "file not found: " + 
e.getMessage());
-        } catch (Exception e) {
-            if (e.getCause() instanceof AmazonS3Exception) {
-                // process minio error msg
-                AmazonS3Exception ea = (AmazonS3Exception) e.getCause();
-                Map<String, String> callbackHeaders = ea.getHttpHeaders();
-                if (callbackHeaders != null && !callbackHeaders.isEmpty()) {
-                    String minioErrMsg = 
callbackHeaders.get("X-Minio-Error-Desc");
-                    if (minioErrMsg != null) {
-                        return new Status(Status.ErrCode.COMMON_ERROR, "Minio 
request error: " + minioErrMsg);
-                    }
-                }
-            }
-            LOG.error("errors while get file status ", e);
-            return new Status(Status.ErrCode.COMMON_ERROR, "errors while get 
file status " + e.getMessage());
-        }
-        return Status.OK;
+        S3ObjStorage objStorage = (S3ObjStorage) this.objStorage;
+        return objStorage.globList(remotePath, result, fileNameOnly);
     }
 
     @VisibleForTesting
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java
 
b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java
index f6714995d67..fdc9ef05d13 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java
@@ -32,7 +32,9 @@ import org.apache.doris.thrift.TFileType;
 
 import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableSet;
+import org.apache.commons.lang3.StringUtils;
 
+import java.util.HashMap;
 import java.util.Map;
 
 /**
@@ -73,7 +75,22 @@ public class S3TableValuedFunction extends 
ExternalFileTableValuedFunction {
 
         S3URI s3uri = getS3Uri(uriStr, 
Boolean.parseBoolean(usePathStyle.toLowerCase()),
                 Boolean.parseBoolean(forceParsingByStandardUri.toLowerCase()));
-
+        String endpoint = constructEndpoint(otherProps, s3uri);
+        if (StringUtils.isNotBlank(endpoint)) {
+            otherProps.putIfAbsent(S3Properties.ENDPOINT, endpoint);
+        }
+        if (!otherProps.containsKey(S3Properties.REGION)) {
+            String region;
+            if (AzureProperties.checkAzureProviderPropertyExist(properties)) {
+                // Azure could run without region
+                region = s3uri.getRegion().orElse("DUMMY-REGION");
+            } else {
+                region = s3uri.getRegion().orElse(null);
+            }
+            if (StringUtils.isNotBlank(region)) {
+                otherProps.put(S3Properties.REGION, region);
+            }
+        }
         // get endpoint first from properties, if not present, get it from s3 
uri.
         // If endpoint is missing, exception will be thrown.
         locationProperties.put(PropertyConverter.USE_PATH_STYLE, usePathStyle);
@@ -82,7 +99,9 @@ public class S3TableValuedFunction extends 
ExternalFileTableValuedFunction {
             // For Azure's compatibility, we need bucket to connect to the 
blob storage's container
             locationProperties.put(S3Properties.BUCKET, s3uri.getBucket());
         }
-        this.storageProperties = 
StorageProperties.createStorageProperties(properties);
+        Map<String, String> p = new HashMap<>(properties);
+        p.putAll(otherProps);
+        this.storageProperties = StorageProperties.createStorageProperties(p);
         
locationProperties.putAll(storageProperties.getBackendConfigProperties());
         locationProperties.putAll(otherProps);
 
@@ -96,6 +115,26 @@ public class S3TableValuedFunction extends 
ExternalFileTableValuedFunction {
         }
     }
 
+    private String constructEndpoint(Map<String, String> properties, S3URI 
s3uri) throws AnalysisException {
+        String endpoint;
+        if (!AzureProperties.checkAzureProviderPropertyExist(properties)) {
+            // get endpoint first from properties, if not present, get it from 
s3 uri.
+            // If endpoint is missing, exception will be thrown.
+            endpoint = getOrDefaultAndRemove(properties, 
S3Properties.ENDPOINT, s3uri.getEndpoint().orElse(""));
+            /*if (Strings.isNullOrEmpty(endpoint)) {
+                throw new AnalysisException(String.format("Properties '%s' is 
required.", S3Properties.ENDPOINT));
+            }*/
+        } else {
+            String bucket = s3uri.getBucket();
+            String accountName = 
properties.getOrDefault(S3Properties.ACCESS_KEY, "");
+            if (accountName.isEmpty()) {
+                throw new AnalysisException(String.format("Properties '%s' is 
required.", S3Properties.ACCESS_KEY));
+            }
+            endpoint = String.format(AzureProperties.AZURE_ENDPOINT_TEMPLATE, 
accountName, bucket);
+        }
+        return endpoint;
+    }
+
     private void forwardCompatibleDeprecatedKeys(Map<String, String> props) {
         for (String deprecatedKey : DEPRECATED_KEYS) {
             String value = props.remove(deprecatedKey);
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/COSPropertiesTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/COSPropertiesTest.java
index b2148f8e53a..9bce29f3c06 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/COSPropertiesTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/COSPropertiesTest.java
@@ -17,18 +17,10 @@
 
 package org.apache.doris.datasource.property.storage;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Disabled;
 import org.junit.jupiter.api.Test;
 
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -58,7 +50,6 @@ public class COSPropertiesTest {
         origProps.put("test_non_storage_param", "6000");
 
         COSProperties cosProperties = (COSProperties) 
StorageProperties.create(origProps).get(1);
-        Configuration config = cosProperties.getHadoopConfiguration();
         Map<String, String> cosConfig = cosProperties.getOrigProps();
         
Assertions.assertTrue(!cosConfig.containsKey("test_non_storage_param"));
 
@@ -67,13 +58,6 @@ public class COSPropertiesTest {
                 Assertions.assertEquals(v, cosConfig.get(k));
             }
         });
-
-        // Validate the configuration
-        Assertions.assertEquals("https://cos.example.com";, 
config.get("fs.cos.endpoint"));
-        Assertions.assertEquals("myCOSAccessKey", 
config.get("fs.cosn.userinfo.secretId"));
-        Assertions.assertEquals("myCOSSecretKey", 
config.get("fs.cosn.userinfo.secretKey"));
-        Assertions.assertEquals("myCOSAccessKey", 
config.get("fs.cosn.userinfo.secretId"));
-        Assertions.assertEquals("myCOSSecretKey", 
config.get("fs.cosn.userinfo.secretKey"));
         origProps = new HashMap<>();
         origProps.put("cos.endpoint", "https://cos.example.com";);
         origProps.put(StorageProperties.FS_COS_SUPPORT, "true");
@@ -110,7 +94,7 @@ public class COSPropertiesTest {
         });
         // Validate the S3 properties
         Assertions.assertEquals("cos.ap-beijing.myqcloud.com", 
s3Props.get("AWS_ENDPOINT"));
-        Assertions.assertEquals("ap-beijing", s3Props.get("AWS_REGION"));
+        Assertions.assertEquals("ap-guangzhou", s3Props.get("AWS_REGION"));
         Assertions.assertEquals("myCOSAccessKey", 
s3Props.get("AWS_ACCESS_KEY"));
         Assertions.assertEquals("myCOSSecretKey", 
s3Props.get("AWS_SECRET_KEY"));
         Assertions.assertEquals("88", s3Props.get("AWS_MAX_CONNECTIONS"));
@@ -123,41 +107,4 @@ public class COSPropertiesTest {
         Assertions.assertEquals("true", s3Props.get("use_path_style"));
         // Add any additional assertions for other properties if needed
     }
-
-    /**
-     * This test method is used for verifying the connectivity and integration 
between
-     * the COS (Cloud Object Storage) and HDFS (Hadoop Distributed File 
System) by
-     * setting COS-specific properties and testing the ability to list files 
from an
-     * HDFS path.
-     * <p>
-     * The method:
-     * 1. Sets COS properties such as endpoint, access key, and secret key.
-     * 2. Converts COS properties to HDFS configuration.
-     * 3. Uses the HDFS configuration to connect to the file system.
-     * 4. Lists the files in the specified HDFS path and prints the file paths 
to the console.
-     * <p>
-     * Note:
-     * This test is currently disabled (@Disabled) and will not be executed 
unless enabled.
-     * The test requires valid COS credentials (access key and secret key) and 
a valid
-     * HDFS path to function correctly.
-     *
-     * @throws URISyntaxException if the URI for the HDFS path is malformed.
-     * @throws IOException        if there are issues with file system access 
or COS properties.
-     */
-    @Disabled
-    @Test
-    public void testCOSHdfsPropertiesTest() throws URISyntaxException, 
IOException {
-        origProps.put("cos.endpoint", "cos.ap-beijing.myqcloud.com");
-        origProps.put("cos.access_key", accessKey);
-        origProps.put("cos.secret_key", secretKey);
-        origProps.put(StorageProperties.FS_COS_SUPPORT, "true");
-        COSProperties cosProperties = (COSProperties) 
StorageProperties.create(origProps).get(1);
-
-        Configuration configuration = cosProperties.getHadoopConfiguration();
-        FileSystem fs = FileSystem.get(new URI(hdfsPath), configuration);
-        FileStatus[] fileStatuses = fs.listStatus(new Path(hdfsPath));
-        for (FileStatus status : fileStatuses) {
-            System.out.println("File Path: " + status.getPath());
-        }
-    }
 }
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/OBSPropertyTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/OBSPropertyTest.java
index b7f3b521044..3228ac737a0 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/OBSPropertyTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/OBSPropertyTest.java
@@ -17,17 +17,9 @@
 
 package org.apache.doris.datasource.property.storage;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.junit.jupiter.api.Assertions;
-import org.junit.jupiter.api.Disabled;
 import org.junit.jupiter.api.Test;
 
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -41,15 +33,6 @@ public class OBSPropertyTest {
         origProps.put("obs.access_key", "myOBSAccessKey");
         origProps.put("obs.secret_key", "myOBSSecretKey");
         origProps.put(StorageProperties.FS_OBS_SUPPORT, "true");
-
-        ObjectStorageProperties properties = (ObjectStorageProperties) 
StorageProperties.create(origProps).get(1);
-        Configuration conf = properties.getHadoopConfiguration();
-
-        Assertions.assertEquals("https://obs.example.com";, 
conf.get("fs.obs.endpoint"));
-        Assertions.assertEquals("myOBSAccessKey", 
conf.get("fs.obs.access.key"));
-        Assertions.assertEquals("myOBSSecretKey", 
conf.get("fs.obs.secret.key"));
-        Assertions.assertEquals("org.apache.hadoop.fs.obs.OBSFileSystem", 
conf.get("fs.obs.impl"));
-
         // Test creation without additional properties
         origProps = new HashMap<>();
         origProps.put("obs.endpoint", "https://obs.example.com";);
@@ -101,41 +84,5 @@ public class OBSPropertyTest {
 
     private static String obsAccessKey = "";
     private static String obsSecretKey = "";
-    private static String hdfsPath = "";
 
-    /**
-     * This test method verifies the integration of OBS (Object Storage 
Service) with HDFS
-     * by setting OBS-specific properties and testing the ability to list 
files from an
-     * HDFS path. It demonstrates how OBS properties can be converted into 
HDFS configuration
-     * settings and used to interact with HDFS.
-     * <p>
-     * The method:
-     * 1. Sets OBS properties such as access key, secret key, and endpoint.
-     * 2. Converts OBS properties to HDFS configuration using the 
`toHadoopConfiguration()` method.
-     * 3. Uses the HDFS configuration to connect to the file system.
-     * 4. Lists the files in the specified HDFS path and prints the file paths 
to the console.
-     * <p>
-     * Note:
-     * This test is currently disabled (@Disabled) and will not be executed 
unless enabled.
-     * The test requires valid OBS credentials (access key and secret key) and 
a valid
-     * HDFS path to function correctly.
-     *
-     * @throws URISyntaxException if the URI for the HDFS path is malformed.
-     * @throws IOException        if there are issues with file system access 
or OBS properties.
-     */
-    @Disabled
-    @Test
-    public void testToHadoopConfiguration() throws URISyntaxException, 
IOException {
-        origProps.put("obs.access_key", obsAccessKey);
-        origProps.put("obs.secret_key", obsSecretKey);
-        origProps.put("obs.endpoint", "obs.cn-north-4.myhuaweicloud.com");
-        origProps.put(StorageProperties.FS_OBS_SUPPORT, "true");
-        OBSProperties obsProperties = (OBSProperties) 
StorageProperties.create(origProps).get(1);
-        Configuration configuration = obsProperties.getHadoopConfiguration();
-        FileSystem fs = FileSystem.get(new URI(hdfsPath), configuration);
-        FileStatus[] fileStatuses = fs.listStatus(new Path(hdfsPath));
-        for (FileStatus status : fileStatuses) {
-            System.out.println("File Path: " + status.getPath());
-        }
-    }
 }
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/OSSPropertiesTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/OSSPropertiesTest.java
index e582da4307e..b687916be7d 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/OSSPropertiesTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/OSSPropertiesTest.java
@@ -17,17 +17,9 @@
 
 package org.apache.doris.datasource.property.storage;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.junit.jupiter.api.Assertions;
-import org.junit.jupiter.api.Disabled;
 import org.junit.jupiter.api.Test;
 
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -44,12 +36,6 @@ public class OSSPropertiesTest {
         origProps.put("oss.access_key", "myOSSAccessKey");
         origProps.put("oss.secret_key", "myOSSSecretKey");
         origProps.put(StorageProperties.FS_OSS_SUPPORT, "true");
-        ObjectStorageProperties properties = (ObjectStorageProperties) 
StorageProperties.create(origProps).get(1);
-        Configuration conf = properties.getHadoopConfiguration();
-        Assertions.assertEquals("https://oss.aliyuncs.com";, 
conf.get("fs.oss.endpoint"));
-        Assertions.assertEquals("myOSSAccessKey", 
conf.get("fs.oss.accessKeyId"));
-        Assertions.assertEquals("myOSSSecretKey", 
conf.get("fs.oss.accessKeySecret"));
-        
Assertions.assertEquals("org.apache.hadoop.fs.aliyun.oss.AliyunOSSFileSystem", 
conf.get("fs.oss.impl"));
         origProps = new HashMap<>();
         origProps.put("oss.endpoint", "https://oss.aliyuncs.com";);
         StorageProperties.create(origProps);
@@ -96,43 +82,4 @@ public class OSSPropertiesTest {
         ossProperties.toNativeS3Configuration(s3Props);
         Assertions.assertEquals("false", s3Props.get("use_path_style"));
     }
-
-
-    /**
-     * This test method verifies the integration between OSS (Object Storage 
Service)
-     * and HDFS by setting OSS-specific properties and testing the ability to 
list
-     * files from an HDFS path. It demonstrates how OSS properties can be 
converted
-     * into Hadoop configuration settings and used to interact with HDFS.
-     * <p>
-     * The method:
-     * 1. Sets OSS properties such as access key, secret key, and endpoint.
-     * 2. Converts OSS properties to HDFS configuration using the 
`toHadoopConfiguration()` method.
-     * 3. Uses the HDFS configuration to connect to the file system.
-     * 4. Lists the files in the specified HDFS path and prints the file paths 
to the console.
-     * <p>
-     * Note:
-     * This test is currently disabled (@Disabled) and will not be executed 
unless enabled.
-     * The test requires valid OSS credentials (access key and secret key) and 
a valid
-     * HDFS path to function correctly.
-     *
-     * @throws URISyntaxException if the URI for the HDFS path is malformed.
-     * @throws IOException        if there are issues with file system access 
or OSS properties.
-     */
-    @Disabled
-    @Test
-    public void testOSSHdfsProperties() throws IOException, URISyntaxException 
{
-        Map<String, String> origProps = new HashMap<>();
-        origProps.put("oss.access_key", ossAccessKey);
-        origProps.put("oss.secret_key", ossSecretKey);
-        origProps.put("oss.endpoint", "oss-cn-beijing-internal.aliyuncs.com");
-        origProps.put(StorageProperties.FS_OSS_SUPPORT, "true");
-        OSSProperties ossProperties = (OSSProperties) 
StorageProperties.create(origProps).get(1);
-        // ossParams.put("fs.AbstractFileSystem.oss.impl", 
"com.aliyun.jindodata.oss.JindoOSS");
-        Configuration configuration = ossProperties.getHadoopConfiguration();
-        FileSystem fs = FileSystem.get(new URI(hdfsPath), configuration);
-        FileStatus[] fileStatuses = fs.listStatus(new Path(hdfsPath));
-        for (FileStatus status : fileStatuses) {
-            System.out.println("File Path: " + status.getPath());
-        }
-    }
 }
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/S3PropertiesTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/S3PropertiesTest.java
index dd0536f9c82..e2f014f981a 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/S3PropertiesTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/S3PropertiesTest.java
@@ -17,18 +17,10 @@
 
 package org.apache.doris.datasource.property.storage;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Disabled;
 import org.junit.jupiter.api.Test;
 
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -51,12 +43,6 @@ public class S3PropertiesTest {
         origProps.put("s3.secret_key", "myS3SecretKey");
         origProps.put("s3.region", "us-west-1");
         origProps.put(StorageProperties.FS_S3_SUPPORT, "true");
-        S3Properties s3Properties = (S3Properties) 
StorageProperties.create(origProps).get(1);
-        Configuration config = s3Properties.getHadoopConfiguration();
-        // Validate the configuration
-        Assertions.assertEquals("myS3AccessKey", 
config.get("fs.s3a.access.key"));
-        Assertions.assertEquals("myS3SecretKey", 
config.get("fs.s3a.secret.key"));
-        Assertions.assertEquals("us-west-1", config.get("fs.s3a.region"));
         origProps = new HashMap<>();
         origProps.put("s3.endpoint", "https://s3.example.com";);
         origProps.put(StorageProperties.FS_S3_SUPPORT, "true");
@@ -111,48 +97,4 @@ public class S3PropertiesTest {
         Assertions.assertEquals("50", s3Props.get("AWS_MAX_CONNECTIONS"));
         Assertions.assertEquals("1000", 
s3Props.get("AWS_CONNECTION_TIMEOUT_MS"));
     }
-
-    /**
-     * This test method verifies the integration between S3 (Amazon Simple 
Storage Service)
-     * and HDFS by setting S3-specific properties and testing the ability to 
list files
-     * from an HDFS path. It demonstrates how S3 properties can be converted 
into
-     * Hadoop configuration settings and used to interact with HDFS.
-     * <p>
-     * The method:
-     * 1. Sets S3 properties such as access key, secret key, endpoint, and 
region.
-     * 2. Converts S3 properties to HDFS configuration using the 
`toHadoopConfiguration()` method.
-     * 3. Uses the HDFS configuration to connect to the file system.
-     * 4. Lists the files in the specified HDFS path and prints the file paths 
to the console.
-     * <p>
-     * Note:
-     * This test is currently disabled (@Disabled) and will not be executed 
unless enabled.
-     * The test requires valid S3 credentials (access key and secret key) and 
a valid
-     * HDFS path to function correctly.
-     *
-     * @throws URISyntaxException if the URI for the HDFS path is malformed.
-     * @throws IOException        if there are issues with file system access 
or S3 properties.
-     */
-    @Disabled
-    @Test
-    public void testS3HdfsPropertiesTest() throws URISyntaxException, 
IOException {
-        origProps.put("s3.endpoint", "s3.ap-northeast-1.amazonaws.com");
-        origProps.put("s3.access_key", accessKey);
-        origProps.put("s3.secret_key", secretKey);
-        origProps.put("s3.region", "ap-northeast-1");
-        origProps.put(StorageProperties.FS_S3_SUPPORT, "true");
-        origProps.put("use_path_style", "true");
-        origProps.put("s3.connection.maximum", "88");
-        origProps.put("s3.connection.timeout", "6000");
-        origProps.put("test_non_storage_param", "6000");
-        S3Properties s3Properties = (S3Properties) 
StorageProperties.create(origProps).get(1);
-        Configuration configuration = s3Properties.getHadoopConfiguration();
-        Assertions.assertEquals("88", 
configuration.get("fs.s3a.connection.maximum"));
-        Assertions.assertEquals("6000", 
configuration.get("fs.s3a.connection.timeout"));
-        Assertions.assertEquals("6000", 
configuration.get("fs.s3a.request.timeout"));
-        FileSystem fs = FileSystem.get(new URI(hdfsPath), configuration);
-        FileStatus[] fileStatuses = fs.listStatus(new Path(hdfsPath));
-        for (FileStatus status : fileStatuses) {
-            System.out.println("File Path: " + status.getPath());
-        }
-    }
 }
diff --git a/regression-test/data/s3_storage/test_s3_tvf_s3_storage.out 
b/regression-test/data/s3_storage/test_s3_tvf_s3_storage.out
index 6f44ed9b1e7..d85e3eae43d 100644
Binary files a/regression-test/data/s3_storage/test_s3_tvf_s3_storage.out and 
b/regression-test/data/s3_storage/test_s3_tvf_s3_storage.out differ
diff --git 
a/regression-test/suites/refactor_storage_param_p0/backup_restore_cos.groovy 
b/regression-test/suites/refactor_storage_param_p0/backup_restore_cos.groovy
index 884eafe2966..f9d3368c913 100644
--- a/regression-test/suites/refactor_storage_param_p0/backup_restore_cos.groovy
+++ b/regression-test/suites/refactor_storage_param_p0/backup_restore_cos.groovy
@@ -17,16 +17,32 @@
 import org.awaitility.Awaitility;
 import static java.util.concurrent.TimeUnit.SECONDS;
 import static groovy.test.GroovyAssert.shouldFail
+
 suite("refactor_storage_backup_restore_cos") {
-    String enabled= 
context.config.otherConfigs.get("enableBackUpRestoreCOSTest");
-    if (enabled == null && enabled.equalsIgnoreCase("false")){
-        return 
-    }
-    def s3table = "test_backup_restore_cos";
-    sql """
-        drop table  if exists ${s3table}; 
-         """
-    sql """
+    def s3table = "test_backup_restore";
+
+    def databaseQueryResult = sql """
+       select database();
+    """
+    println databaseQueryResult
+    def currentDBName = databaseQueryResult.get(0).get(0)
+    println currentDBName
+    // cos
+
+    def createDBAndTbl = { String dbName ->
+
+        sql """
+                drop database if exists ${dbName}
+            """
+
+        sql """
+            create database ${dbName}
+        """
+
+        sql  """
+             use ${dbName}
+             """
+        sql """
         CREATE TABLE ${s3table}(
             user_id            BIGINT       NOT NULL COMMENT "user id",
             name               VARCHAR(20)           COMMENT "name",
@@ -38,141 +54,194 @@ suite("refactor_storage_backup_restore_cos") {
             "replication_num" = "1"
         );
     """
-    sql """
+        sql """
         insert into ${s3table} values (1, 'a', 10);
     """
 
-    def insertResult = sql """
+        def insertResult = sql """
         SELECT count(1) FROM ${s3table}
     """
 
-    println "insertResult: ${insertResult}"
+        println "insertResult: ${insertResult}"
 
-    assert insertResult.get(0).get(0) == 1
-    def databaseQueryResult = sql """
-       select database();
-    """
-    println databaseQueryResult
-    def currentDBName = databaseQueryResult.get(0).get(0)
-    println currentDBName
-    // cos
-   
-    String objectAccessKey = context.config.otherConfigs.get("cosAK");
-    String objectSecretKey = context.config.otherConfigs.get("cosSK");
-    String objectStorageEndpoint 
=context.config.otherConfigs.get("cosEndpoint");
-    String objectStorageRegion = context.config.otherConfigs.get("cosRegion");
-    String objectStorageFilePathPrefix 
=context.config.otherConfigs.get("cosFilePathPrefix");
-
-    def objectStorageRepoName = "cos_repo_test_1";
-    try {
-        sql """
-            drop repository  ${objectStorageRepoName};
-        """
-    } catch (Exception e) {
-        //ignore exception, repo may not exist
+        assert insertResult.get(0).get(0) == 1
     }
 
-   
-    shouldFail {
+    def createRepository = { String repoName, String endpointName, String 
endpoint, String regionName, String region, String accessKeyName, String 
accessKey, String secretKeyName, String secretKey, String usePathStyle, String 
location ->
+        try {
+            sql """
+                drop repository  ${repoName};
+            """
+        } catch (Exception e) {
+            // ignore exception, repo may not exist
+        }
+
         sql """
-            CREATE REPOSITORY  ${objectStorageRepoName}
-        WITH S3
-        ON LOCATION "${objectStorageFilePathPrefix}"
-        PROPERTIES (
-            "s3.endpoint" = "${objectStorageEndpoint}",
-            "s3.region" = "${objectStorageRegion}",
-            "s3.secret_key" = "${objectSecretKey}"
-        );
+            CREATE REPOSITORY  ${repoName}
+            WITH S3
+            ON LOCATION "${location}"
+            PROPERTIES (
+                "${endpointName}" = "${endpoint}",
+                "${regionName}" = "${region}",
+                "${accessKeyName}" = "${accessKey}",
+                "${secretKeyName}" = "${secretKey}",
+                "use_path_style" = "${usePathStyle}"
+            );
         """
     }
-    // Invalid export path https:// please use valid 's3://' path.
-    shouldFail {
-        objectStorageHttpsFilePathPrefix = 
objectStorageFilePathPrefix.replaceAll("^s3://", "https://";);
+
+    def backupAndRestore = { String repoName, String dbName, String tableName, 
String backupLabel ->
         sql """
-        CREATE REPOSITORY  ${objectStorageRepoName}_https_prefix
-        WITH S3
-        ON LOCATION "https://${objectStorageHttpsFilePathPrefix}";
-                PROPERTIES (
-            "s3.endpoint" = "${objectStorageEndpoint}",
-            "s3.region" = "${objectStorageRegion}",
-            "s3.access_key" = "${objectAccessKey}",
-            "s3.secret_key" = "${objectSecretKey}"
-        );
+        BACKUP SNAPSHOT ${dbName}.${backupLabel}
+        TO ${repoName}
+        ON (${tableName})
+    """
+        Awaitility.await().atMost(60, SECONDS).pollInterval(5, SECONDS).until(
+                {
+                    def backupResult = sql """
+                show backup from ${dbName} where SnapshotName = 
'${backupLabel}';
+            """
+                    println "backupResult: ${backupResult}"
+                    return backupResult.get(0).get(3) == "FINISHED"
+                })
+
+        def querySnapshotResult = sql """
+        SHOW SNAPSHOT ON ${repoName} WHERE SNAPSHOT =  '${backupLabel}';
         """
-    }
-    shouldFail {
+        println querySnapshotResult
+        def snapshotTimes = querySnapshotResult.get(0).get(1).split('\n')
+        def snapshotTime = snapshotTimes[0]
+
         sql """
-            CREATE REPOSITORY  ${objectStorageRepoName}
-        WITH S3
-        ON LOCATION "${objectStorageFilePathPrefix}"
-        PROPERTIES (
-            "s3.endpoint" = "${objectStorageEndpoint}",
-            "s3.region" = "${objectStorageRegion}",
-            "s3.secret_key" = "${objectSecretKey}"
-        );
+        drop table  if exists ${tableName}; 
         """
-    }
-    sql """
-        CREATE REPOSITORY  ${objectStorageRepoName}
-        WITH S3
-        ON LOCATION "${objectStorageFilePathPrefix}"
-        PROPERTIES (
-            "s3.endpoint" = "${objectStorageEndpoint}",
-            "s3.region" = "${objectStorageRegion}",
-            "s3.access_key" = "${objectAccessKey}",
-            "s3.secret_key" = "${objectSecretKey}"
-        );
-    """
-    
-    def objectStorageBackupLabel = "oss_label_1" + System.currentTimeMillis()
-    sql """
-       BACKUP SNAPSHOT ${currentDBName}.${objectStorageBackupLabel}
-        TO ${objectStorageRepoName}
-        ON (${s3table})
-        PROPERTIES ("type" = "full");
-        
-    """
-    Awaitility.await().atMost(60, SECONDS).pollInterval(5, SECONDS).until(
-            {
-                def backupResult = sql """
-                    show backup from ${currentDBName} where SnapshotName = 
'${objectStorageBackupLabel}';
-                """
-                println "backupResult: ${backupResult}"
-                return backupResult.get(0).get(3) == "FINISHED"
-            })
-
-    def queryCosSnapshotResult = sql """
-        SHOW SNAPSHOT ON ${objectStorageRepoName} WHERE SNAPSHOT =  
'${objectStorageBackupLabel}';
-    """
-    println queryCosSnapshotResult
-    def objectSecretSnapshotTime = queryCosSnapshotResult.get(0).get(1)
-    sql """
-        drop table  if exists ${s3table}; 
-    """
-    //restore
-    sql """
-        RESTORE SNAPSHOT ${currentDBName}.${objectStorageBackupLabel}
-        FROM ${objectStorageRepoName}
-        ON (`${s3table}`)
+
+        sql """
+        RESTORE SNAPSHOT ${dbName}.${backupLabel}
+        FROM ${repoName}
+        ON (`${tableName}`)
         PROPERTIES
         (
-            "backup_timestamp"="${objectSecretSnapshotTime}",
+            "backup_timestamp"="${snapshotTime}",
             "replication_num" = "1"
         );
-    """
-    Awaitility.await().atMost(60, SECONDS).pollInterval(5, SECONDS).until(
-            {
-                try {
-                    def restoreResult = sql """
-                        SELECT count(1) FROM ${s3table}
-                    """
-                    println "restoreResult: ${restoreResult}"
-                    return restoreResult.get(0).get(0) == 1
-                } catch (Exception e) {
-                    //tbl not found
-                    return false
-                }
-            })
+        """
+        Awaitility.await().atMost(60, SECONDS).pollInterval(5, SECONDS).until(
+                {
+                    try {
+
+                        sql """
+                        use ${dbName}
+                        """
+                        def restoreResult = sql """
+                         SELECT count(1) FROM ${tableName}
+                        """
+                        println "restoreResult: ${restoreResult}"
+                        def count = restoreResult.get(0).get(0)
+                        println "count: ${count}"
+                        return restoreResult.get(0).get(0) == 1
+                    } catch (Exception e) {
+                        // tbl not found
+                        println "tbl not found"+e.getMessage()
+                        return false
+                    }
+                })
+    }
 
 
+
+    def test_backup_restore= {String ak,String sk,String s3_endpoint,String 
region,String bucket,String objPrefix ->
+        def s3repoName1 = "${objPrefix}_repo_1"
+        createRepository("${s3repoName1}", "s3.endpoint", s3_endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "true", 
"s3://${bucket}/test_" + System.currentTimeMillis())
+
+        def dbName1 = currentDBName + "${objPrefix}_1"
+        createDBAndTbl("${dbName1}")
+        backupAndRestore("${s3repoName1}", dbName1, s3table, 
"backup_${s3repoName1}_test")
+        def s3repoName2 = "${objPrefix}_repo_2"
+        createRepository("${s3repoName2}", "s3.endpoint", s3_endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "false", 
"s3://${bucket}/test_" + System.currentTimeMillis())
+        def dbName2 = currentDBName + "${objPrefix}_2"
+        createDBAndTbl("${dbName2}")
+        backupAndRestore("${s3repoName2}", dbName2, s3table, 
"backup_${s3repoName2}_test")
+
+        def s3repoName3 = "${objPrefix}_repo_3"
+        createRepository("${s3repoName3}", "s3.endpoint", s3_endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "", 
"s3://${bucket}/test_" + System.currentTimeMillis())
+        def dbName3 = currentDBName + "${objPrefix}_3"
+        createDBAndTbl("${dbName3}")
+        backupAndRestore("${s3repoName3}", dbName3, s3table, 
"backup_${s3repoName3}_test")
+
+        def s3repoName4 = "${objPrefix}_s3_repo_4"
+        createRepository("${s3repoName4}", "s3.endpoint", s3_endpoint, 
"s3.region", region, "AWS_ACCESS_KEY", ak, "AWS_SECRET_KEY", sk, "true", 
"s3://${bucket}/test_" + System.currentTimeMillis())
+        def dbName4 = currentDBName + "${objPrefix}_4"
+        createDBAndTbl("${dbName4}")
+        backupAndRestore("${s3repoName4}", dbName4, s3table, 
"backup_${s3repoName4}_test")
+        def s3repoName5 = "${objPrefix}_s3_repo_5"
+        createRepository("${s3repoName5}", "s3.endpoint", s3_endpoint, 
"s3.region", region, "AWS_ACCESS_KEY", ak, "AWS_SECRET_KEY", sk, "false", 
"s3://${bucket}/test_" + System.currentTimeMillis())
+        def dbName5 = currentDBName + "${objPrefix}_5"
+        createDBAndTbl("${dbName5}")
+        backupAndRestore("${s3repoName5}", dbName5, s3table, 
"backup_${s3repoName5}_test")
+        def s3repoName6 = "${objPrefix}_s3_repo_6"
+        createRepository("${s3repoName6}", "AWS_ENDPOINT", s3_endpoint, 
"AWS_REGION", region, "AWS_ACCESS_KEY", ak, "AWS_SECRET_KEY", sk, "false", 
"s3://${bucket}/test_" + System.currentTimeMillis())
+        def dbName6 = currentDBName + "${objPrefix}_6"
+        createDBAndTbl("${dbName6}")
+        backupAndRestore("${s3repoName6}", dbName6, s3table, 
"backup_${s3repoName6}_test")
+        def failedRepoName = "s3_repo_failed"
+        // wrong address
+        shouldFail {
+            createRepository("${failedRepoName}", "s3.endpoint", s3_endpoint, 
"s3.region", region, "AWS_ACCESS_KEY", ak, "AWS_SECRET_KEY", sk, "true", 
"s3://ck/" + System.currentTimeMillis())
+        }
+
+        shouldFail {
+            createRepository("${failedRepoName}", "s3.endpoint", s3_endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "", 
"https://${bucket}/test_"; + System.currentTimeMillis())
+        }
+        // http://${bucket}/test_"+System.currentTimeMillis()
+        shouldFail {
+            createRepository("${failedRepoName}", "s3.endpoint", s3_endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "", 
"http://${bucket}/test_"; + System.currentTimeMillis())
+        }
+        // https://${bucket}/test_"+System.currentTimeMillis()
+        shouldFail {
+            createRepository("${failedRepoName}", "s3.endpoint", s3_endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "", 
"https://${bucket}/test_"; + System.currentTimeMillis())
+        }
+        //endpoint is empty
+        shouldFail {
+            createRepository("${failedRepoName}", "s3.endpoint", "", 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "", 
"s3://${bucket}/test_" + System.currentTimeMillis())
+        }
+        //region is empty
+        shouldFail {
+            createRepository("${failedRepoName}", "s3.endpoint", "", 
"s3.region", "", "s3.access_key", ak, "s3.secret_key", sk, "", 
"s3://${bucket}/test_" + System.currentTimeMillis())
+        }
+    }
+    String  ak = context.config.otherConfigs.get("AWSAK")
+    String sk = context.config.otherConfigs.get("AWSSK")
+    String s3_endpoint = "s3.ap-northeast-1.amazonaws.com"
+    String region = "ap-northeast-1"
+    String bucket = "selectdb-qa-datalake-test"
+    String objPrefix="s3"
+    /*-------------AWS S3--------------------------------*/
+    //test_backup_restore(ak,sk,s3_endpoint,region,bucket,objPrefix)
+    /*-----------------Tencent COS----------------*/
+    ak = context.config.otherConfigs.get("txYunAk")
+    sk = context.config.otherConfigs.get("txYunSk")
+    s3_endpoint = "cos.ap-beijing.myqcloud.com"
+    region = "ap-beijing"
+    bucket = "doris-build-1308700295";
+    
+    objPrefix="cos"
+    test_backup_restore(ak,sk,s3_endpoint,region,bucket,objPrefix)
+    /*-----------------Huawei OBS----------------*/
+    ak = context.config.otherConfigs.get("hwYunAk")
+    sk = context.config.otherConfigs.get("hwYunSk")
+    s3_endpoint = "obs.cn-north-4.myhuaweicloud.com"
+    region = "cn-north-4"
+    bucket = "doris-build";
+    objPrefix="obs"
+    test_backup_restore(ak,sk,s3_endpoint,region,bucket,objPrefix)
+    /*-----------------Aliyun OSS----------------*/
+    ak = context.config.otherConfigs.get("aliYunAk")
+    sk = context.config.otherConfigs.get("aliYunSk")
+    s3_endpoint = "oss-cn-hongkong.aliyuncs.com"
+    region = "oss-cn-hongkong"
+    bucket = "doris-regression-hk";
+    objPrefix="oss"
+    // oss has some problem, so we comment it.
+    //test_backup_restore(ak,sk,s3_endpoint,region,bucket,objPrefix)
 }
\ No newline at end of file
diff --git a/regression-test/suites/refactor_storage_param_p0/s3_load.groovy 
b/regression-test/suites/refactor_storage_param_p0/s3_load.groovy
new file mode 100644
index 00000000000..62990050ae0
--- /dev/null
+++ b/regression-test/suites/refactor_storage_param_p0/s3_load.groovy
@@ -0,0 +1,297 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+import org.awaitility.Awaitility
+
+import static groovy.test.GroovyAssert.shouldFail;
+import static java.util.concurrent.TimeUnit.SECONDS;
+
+suite("refactor_storage_param_load") {
+
+    String  ak = context.config.otherConfigs.get("AWSAK")
+    String sk = context.config.otherConfigs.get("AWSSK")
+    String endpoint = "s3.ap-northeast-1.amazonaws.com"
+    String region = "ap-northeast-1"
+    String bucket = "selectdb-qa-datalake-test"
+
+    def s3table = "test_s3load";
+    sql """
+        drop table  if exists ${s3table}; 
+         """
+    sql """
+        CREATE TABLE ${s3table}(
+            user_id            BIGINT       NOT NULL COMMENT "user id",
+            name               VARCHAR(20)           COMMENT "name",
+            age                INT                   COMMENT "age"
+        )
+        DUPLICATE KEY(user_id)
+        DISTRIBUTED BY HASH(user_id) BUCKETS 10
+        PROPERTIES (
+            "replication_num" = "1"
+        );
+    """
+    sql """
+        insert into ${s3table} values (1, 'a', 10);
+    """
+
+    def insertResult = sql """
+        SELECT count(1) FROM ${s3table}
+    """
+
+    println "insertResult: ${insertResult}"
+    assert insertResult.get(0).get(0) == 1
+
+    def outfile_to_S3 = { objBucket, objEndpoint, objRegion, objAk, objSk ->
+        def outFilePath = "${objBucket}/outfile_different_s3/exp_"
+        // select ... into outfile ...
+        def res = sql """
+            SELECT * FROM ${s3table} t ORDER BY user_id
+            INTO OUTFILE "s3://${outFilePath}"
+            FORMAT AS CSV
+            PROPERTIES (
+                "s3.endpoint" = "${objEndpoint}",
+                "s3.region" = "${objRegion}",
+                "s3.secret_key"="${objSk}",
+                "s3.access_key" = "${objAk}"
+            );
+        """
+        return res[0][3]
+    }
+    def outfile_path = outfile_to_S3(bucket, endpoint, region, ak, sk);
+    def filePath = outfile_path.replace("s3://${bucket}", "")
+
+    def s3Load = { String objFilePath, String objBucket, String 
objEndpointName, String objEndpoint, String objRegionName, String objRegion, 
String objAkName, String objAk, String objSkName, String objSk, String 
usePathStyle ->
+
+        def dataCountResult = sql """
+            SELECT count(*) FROM ${s3table}
+        """
+        def dataCount = dataCountResult[0][0]
+        def label = "s3_load_label_" + System.currentTimeMillis()
+        def load = sql """
+            LOAD LABEL `${label}` (
+           data infile ("${objFilePath}")
+           into table ${s3table}
+            COLUMNS TERMINATED BY "\\\t"
+            FORMAT AS "CSV"
+             (
+                user_id,
+                name,
+                age
+             ))
+             with s3
+             (
+               "${objEndpointName}" = "${objEndpoint}",
+               "${objRegionName}" = "${objRegion}",
+               "${objSkName}"="${objSk}",
+               "use_path_style" = "${usePathStyle}",
+               "${objAkName}" = "${objAk}"
+             )
+             PROPERTIES
+            (
+                "timeout" = "3600"
+            );
+        """
+        Awaitility.await().atMost(60, SECONDS).pollInterval(5, SECONDS).until({
+            def loadResult = sql """
+           show load where label = '${label}'
+           """
+            if (loadResult.get(0).get(2) == 'CANCELLED' || 
loadResult.get(0).get(2) == 'FAILED') {
+                throw new RuntimeException("load failed")
+            }
+            return loadResult.get(0).get(2) == 'FINISHED'
+        })
+
+
+        def expectedCount = dataCount + 1
+        Awaitility.await().atMost(60, SECONDS).pollInterval(5, SECONDS).until({
+            def loadResult = sql """
+            select count(*) from ${s3table}
+        """
+            println "loadResult: ${loadResult} "
+            return loadResult.get(0).get(0) == expectedCount
+        })
+
+    }
+    s3Load("s3://${bucket}${filePath}", bucket, "s3.endpoint", endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "true")
+    s3Load("s3://${bucket}${filePath}", bucket, "s3.endpoint", endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "false")
+    s3Load("s3://${bucket}${filePath}", bucket, "s3.endpoint", endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "")
+    s3Load("s3://${bucket}${filePath}", bucket, "s3.endpoint", endpoint, 
"s3.region", region, "AWS_ACCESS_KEY", ak, "AWS_SECRET_KEY", sk, "")
+    s3Load("http://${bucket}${filePath}";, bucket, "s3.endpoint", endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "true")
+    s3Load("http://${bucket}${filePath}";, bucket, "s3.endpoint", endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "")
+    s3Load("https://${bucket}${filePath}";, bucket, "s3.endpoint", endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "")
+    s3Load("https://${bucket}${filePath}";, bucket, "s3.endpoint", endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "false")
+    shouldFail {
+        s3Load("https://${bucket}${filePath}";, bucket, "", endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "false")
+    }
+
+    shouldFail {
+        s3Load("https://${bucket}${filePath}";, bucket, "", endpoint, 
"s3.region", region, "s3.access_key", "", "s3.secret_key", sk, "false")
+    }
+    shouldFail {
+        s3Load("https://${bucket}/${endpoint}${filePath}";, bucket, 
"s3.endpoint", endpoint, "s3.region", region, "s3.access_key", ak, 
"s3.secret_key", sk, "")
+
+    }
+    shouldFail {
+        s3Load("https://${bucket}/${endpoint}${filePath}";, bucket, 
"s3.endpoint", endpoint, "s3.region", region, "s3.access_key", ak, 
"s3.secret_key", sk, "true")
+    }
+    shouldFail {
+        s3Load("s3://${endpoint}/${bucket}${filePath}", bucket, "s3.endpoint", 
endpoint, "s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, 
"false")
+    }
+    shouldFail {
+        s3Load("s3://${bucket}/${endpoint}${filePath}", bucket, "s3.endpoint", 
endpoint, "s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, 
"false")
+    }
+    shouldFail {
+        s3Load("s3://${endpoint}/${bucket}${filePath}", bucket, "s3.endpoint", 
endpoint, "s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, 
"false")
+    }
+    /*----------obs---------------*/
+    ak = context.config.otherConfigs.get("hwYunAk")
+    sk = context.config.otherConfigs.get("hwYunSk")
+    endpoint = "obs.cn-north-4.myhuaweicloud.com"
+    region = "cn-north-4"
+    bucket = "doris-build";
+    outfile_path = outfile_to_S3(bucket, endpoint, region, ak, sk);
+    filePath = outfile_path.replace("s3://${bucket}", "")
+    s3Load("s3://${bucket}${filePath}", bucket, "s3.endpoint", endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "true")
+    s3Load("s3://${bucket}${filePath}", bucket, "s3.endpoint", endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "false")
+    s3Load("s3://${bucket}${filePath}", bucket, "obs.endpoint", endpoint, 
"obs.region", region, "obs.access_key", ak, "obs.secret_key", sk, "true")
+    s3Load("s3://${bucket}${filePath}", bucket, "obs.endpoint", endpoint, 
"obs.region", region, "obs.access_key", ak, "obs.secret_key", sk, "false")
+    s3Load("obs://${bucket}${filePath}", bucket, "obs.endpoint", endpoint, 
"obs.region", region, "obs.access_key", ak, "obs.secret_key", sk, "true")
+    s3Load("obs://${bucket}${filePath}", bucket, "obs.endpoint", endpoint, 
"obs.region", region, "obs.access_key", ak, "obs.secret_key", sk, "false")
+    s3Load("obs://${bucket}${filePath}", bucket, "obs.endpoint", endpoint, 
"obs.region", region, "obs.access_key", ak, "obs.secret_key", sk, "")
+    s3Load("s3://${bucket}${filePath}", bucket, "obs.endpoint", endpoint, 
"obs.region", region, "obs.access_key", ak, "obs.secret_key", sk, "")
+    s3Load("http://${bucket}${filePath}";, bucket, "obs.endpoint", endpoint, 
"obs.region", region, "obs.access_key", ak, "obs.secret_key", sk, "")
+    s3Load("https://${bucket}${filePath}";, bucket, "obs.endpoint", endpoint, 
"obs.region", region, "obs.access_key", ak, "obs.secret_key", sk, "")
+    s3Load("http://${bucket}${filePath}";, bucket, "obs.endpoint", endpoint, 
"obs.region", region, "obs.access_key", ak, "obs.secret_key", sk, "true")
+    s3Load("http://${bucket}${filePath}";, bucket, "obs.endpoint", endpoint, 
"obs.region", region, "obs.access_key", ak, "obs.secret_key", sk, "false")
+    shouldFail {
+        s3Load("https://${bucket}${filePath}";, bucket, "", endpoint, 
"obs.region", region, "obs.access_key", ak, "obs.secret_key", sk, "false")
+    }
+
+    shouldFail {
+        s3Load("https://${bucket}${filePath}";, bucket, "", endpoint, 
"obs.region", region, "obs.access_key", "", "obs.secret_key", sk, "false")
+    }
+    shouldFail {
+        s3Load("https://${bucket}/${endpoint}${filePath}";, bucket, 
"obs.endpoint", endpoint, "obs.region", region, "obs.access_key", ak, 
"obs.secret_key", sk, "")
+
+    }
+    shouldFail {
+        s3Load("https://${bucket}/${endpoint}${filePath}";, bucket, 
"obs.endpoint", endpoint, "obs.region", region, "obs.access_key", ak, 
"obs.secret_key", sk, "true")
+    }
+    shouldFail {
+        s3Load("s3://${endpoint}/${bucket}${filePath}", bucket, 
"obs.endpoint", endpoint, "obs.region", region, "obs.access_key", ak, 
"obs.secret_key", sk, "false")
+    }
+    shouldFail {
+        s3Load("obs://${bucket}/${endpoint}${filePath}", bucket, 
"obs.endpoint", endpoint, "obs.region", region, "obs.access_key", ak, 
"obs.secret_key", sk, "false")
+    }
+    shouldFail {
+        s3Load("obs://${endpoint}/${bucket}${filePath}", bucket, 
"obs.endpoint", endpoint, "obs.region", region, "obs.access_key", ak, 
"obs.secret_key", sk, "false")
+    }
+    
+    /*-------------Tencent COS ----------*/
+    ak = context.config.otherConfigs.get("txYunAk")
+    sk = context.config.otherConfigs.get("txYunSk")
+    endpoint = "cos.ap-beijing.myqcloud.com"
+    region = "ap-beijing"
+    bucket = "doris-build-1308700295";
+
+    outfile_path = outfile_to_S3(bucket, endpoint, region, ak, sk);
+    filePath = outfile_path.replace("s3://${bucket}", "")
+    s3Load("s3://${bucket}${filePath}", bucket, "s3.endpoint", endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "true")
+    s3Load("s3://${bucket}${filePath}", bucket, "s3.endpoint", endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "false")
+    s3Load("s3://${bucket}${filePath}", bucket, "cos.endpoint", endpoint, 
"cos.region", region, "cos.access_key", ak, "cos.secret_key", sk, "true")
+    s3Load("s3://${bucket}${filePath}", bucket, "cos.endpoint", endpoint, 
"cos.region", region, "cos.access_key", ak, "cos.secret_key", sk, "false")
+    s3Load("cos://${bucket}${filePath}", bucket, "cos.endpoint", endpoint, 
"cos.region", region, "cos.access_key", ak, "cos.secret_key", sk, "true")
+    s3Load("cos://${bucket}${filePath}", bucket, "cos.endpoint", endpoint, 
"cos.region", region, "cos.access_key", ak, "cos.secret_key", sk, "false")
+    s3Load("cos://${bucket}${filePath}", bucket, "cos.endpoint", endpoint, 
"cos.region", region, "cos.access_key", ak, "cos.secret_key", sk, "")
+    s3Load("s3://${bucket}${filePath}", bucket, "cos.endpoint", endpoint, 
"cos.region", region, "cos.access_key", ak, "cos.secret_key", sk, "")
+    s3Load("http://${bucket}${filePath}";, bucket, "cos.endpoint", endpoint, 
"cos.region", region, "cos.access_key", ak, "cos.secret_key", sk, "")
+    s3Load("https://${bucket}${filePath}";, bucket, "cos.endpoint", endpoint, 
"cos.region", region, "cos.access_key", ak, "cos.secret_key", sk, "")
+    s3Load("http://${bucket}${filePath}";, bucket, "cos.endpoint", endpoint, 
"cos.region", region, "cos.access_key", ak, "cos.secret_key", sk, "true")
+    s3Load("http://${bucket}${filePath}";, bucket, "cos.endpoint", endpoint, 
"cos.region", region, "cos.access_key", ak, "cos.secret_key", sk, "false")
+    shouldFail {
+        s3Load("https://${bucket}${filePath}";, bucket, "", endpoint, 
"cos.region", region, "cos.access_key", ak, "cos.secret_key", sk, "false")
+    }
+
+    shouldFail {
+        s3Load("https://${bucket}${filePath}";, bucket, "", endpoint, 
"cos.region", region, "cos.access_key", "", "cos.secret_key", sk, "false")
+    }
+    shouldFail {
+        s3Load("https://${bucket}/${endpoint}${filePath}";, bucket, 
"cos.endpoint", endpoint, "cos.region", region, "cos.access_key", ak, 
"obs.secret_key", sk, "")
+
+    }
+    shouldFail {
+        s3Load("https://${bucket}/${endpoint}${filePath}";, bucket, 
"cos.endpoint", endpoint, "cos.region", region, "cos.access_key", ak, 
"cos.secret_key", sk, "true")
+    }
+    shouldFail {
+        s3Load("s3://${endpoint}/${bucket}${filePath}", bucket, 
"cos.endpoint", endpoint, "cos.region", region, "cos.access_key", ak, 
"cos.secret_key", sk, "false")
+    }
+    shouldFail {
+        s3Load("cos://${bucket}/${endpoint}${filePath}", bucket, 
"cos.endpoint", endpoint, "cos.region", region, "cos.access_key", ak, 
"cos.secret_key", sk, "false")
+    }
+    shouldFail {
+        s3Load("cos://${endpoint}/${bucket}${filePath}", bucket, 
"cos.endpoint", endpoint, "cos.region", region, "cos.access_key", ak, 
"cos.secret_key", sk, "false")
+    }
+    /************ Aliyun OSS ************/
+    /*-----------------Aliyun OSS----------------*/
+/*    ak = context.config.otherConfigs.get("aliYunAk")
+    sk = context.config.otherConfigs.get("aliYunSk")
+    endpoint = "oss-cn-hongkong.aliyuncs.com"
+    region = "oss-cn-hongkong"
+    bucket = "doris-regression-hk";
+
+    outfile_path = outfile_to_S3(bucket, endpoint, region, ak, sk);
+    filePath = outfile_path.replace("s3://${bucket}", "")
+    s3Load("s3://${bucket}${filePath}", bucket, "s3.endpoint", endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "true")
+    s3Load("s3://${bucket}${filePath}", bucket, "s3.endpoint", endpoint, 
"s3.region", region, "s3.access_key", ak, "s3.secret_key", sk, "false")
+    s3Load("s3://${bucket}${filePath}", bucket, "oss.endpoint", endpoint, 
"oss.region", region, "oss.access_key", ak, "oss.secret_key", sk, "true")
+    s3Load("s3://${bucket}${filePath}", bucket, "oss.endpoint", endpoint, 
"oss.region", region, "oss.access_key", ak, "oss.secret_key", sk, "false")
+    s3Load("cos://${bucket}${filePath}", bucket, "oss.endpoint", endpoint, 
"oss.region", region, "oss.access_key", ak, "oss.secret_key", sk, "true")
+    s3Load("cos://${bucket}${filePath}", bucket, "oss.endpoint", endpoint, 
"oss.region", region, "oss.access_key", ak, "oss.secret_key", sk, "false")
+    s3Load("cos://${bucket}${filePath}", bucket, "oss.endpoint", endpoint, 
"oss.region", region, "oss.access_key", ak, "oss.secret_key", sk, "")
+    s3Load("s3://${bucket}${filePath}", bucket, "oss.endpoint", endpoint, 
"oss.region", region, "oss.access_key", ak, "oss.secret_key", sk, "")
+    s3Load("http://${bucket}${filePath}";, bucket, "oss.endpoint", endpoint, 
"oss.region", region, "oss.access_key", ak, "oss.secret_key", sk, "")
+    s3Load("https://${bucket}${filePath}";, bucket, "oss.endpoint", endpoint, 
"oss.region", region, "oss.access_key", ak, "oss.secret_key", sk, "")
+    s3Load("http://${bucket}${filePath}";, bucket, "oss.endpoint", endpoint, 
"oss.region", region, "oss.access_key", ak, "oss.secret_key", sk, "true")
+    s3Load("http://${bucket}${filePath}";, bucket, "oss.endpoint", endpoint, 
"oss.region", region, "oss.access_key", ak, "oss.secret_key", sk, "false")
+    shouldFail {
+        s3Load("https://${bucket}${filePath}";, bucket, "", endpoint, 
"oss.region", region, "oss.access_key", ak, "oss.secret_key", sk, "false")
+    }
+
+    shouldFail {
+        s3Load("https://${bucket}${filePath}";, bucket, "", endpoint, 
"oss.region", region, "oss.access_key", "", "oss.secret_key", sk, "false")
+    }
+    shouldFail {
+        s3Load("https://${bucket}/${endpoint}${filePath}";, bucket, 
"oss.endpoint", endpoint, "oss.region", region, "oss.access_key", ak, 
"oss.secret_key", sk, "")
+
+    }
+    shouldFail {
+        s3Load("https://${bucket}/${endpoint}${filePath}";, bucket, 
"oss.endpoint", endpoint, "oss.region", region, "oss.access_key", ak, 
"oss.secret_key", sk, "true")
+    }
+    shouldFail {
+        s3Load("s3://${endpoint}/${bucket}${filePath}", bucket, 
"oss.endpoint", endpoint, "oss.region", region, "oss.access_key", ak, 
"oss.secret_key", sk, "false")
+    }
+    shouldFail {
+        s3Load("oss://${bucket}/${endpoint}${filePath}", bucket, 
"oss.endpoint", endpoint, "oss.region", region, "oss.access_key", ak, 
"oss.secret_key", sk, "false")
+    }
+    shouldFail {
+        s3Load("oss://${endpoint}/${bucket}${filePath}", bucket, 
"oss.endpoint", endpoint, "oss.region", region, "oss.access_key", ak, 
"oss.secret_key", sk, "false")
+    }
+    */
+    
+
+} 
+
+
diff --git a/regression-test/suites/s3_storage/test_s3_tvf_s3_storage.groovy 
b/regression-test/suites/s3_storage/test_s3_tvf_s3_storage.groovy
index 46a3b49f167..25e3b19e17a 100644
--- a/regression-test/suites/s3_storage/test_s3_tvf_s3_storage.groovy
+++ b/regression-test/suites/s3_storage/test_s3_tvf_s3_storage.groovy
@@ -100,22 +100,23 @@ suite("test_s3_tvf_s3_storage", "p0") {
         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "region", "false");
         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "region", "false");
         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "s3.region", "false");
-        // s3_tvf("http://${bucket}.${s3_endpoint}";, "", "cos.access_key" , 
"cos.secret_key", "region", "false");
-        // s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "region", "true");
-        // s3_tvf("http://${bucket}.${s3_endpoint}";, "cos.endpoint", 
"s3.access_key" , "s3.secret_key", "region", "false");
+         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "cos.access_key" , 
"cos.secret_key", "region", "false");
+
+        //s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "region", "true");
+         s3_tvf("http://${bucket}.${s3_endpoint}";, "cos.endpoint", 
"s3.access_key" , "s3.secret_key", "region", "false");
         s3_tvf("http://${bucket}.${s3_endpoint}";, "s3.endpoint", 
"s3.access_key" , "s3.secret_key", "region", "false");
         s3_tvf("http://${s3_endpoint}/${bucket}";, "", "s3.access_key" , 
"s3.secret_key", "region", "true");
-        // s3_tvf("http://${s3_endpoint}/${bucket}";, "", "s3.access_key" , 
"s3.secret_key", "region", "false");
+        //s3_tvf("http://${s3_endpoint}/${bucket}";, "", "s3.access_key" , 
"s3.secret_key", "region", "false");
         // s3_tvf("s3://${s3_endpoint}/${bucket}", "", "s3.access_key" , 
"s3.secret_key", "region", "false");
         s3_tvf("s3://${bucket}", "s3.endpoint", "s3.access_key" , 
"s3.secret_key", "region", "false");
         s3_tvf("s3://${bucket}", "s3.endpoint", "s3.access_key" , 
"s3.secret_key", "s3.region", "true");
-        // s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "region", "false");
-        // s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "s3.region", "false");
-        // s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "AWS_REGION", "false");
+         s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "region", "false");
+         s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "s3.region", "false");
+         s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "AWS_REGION", "false");
         s3_tvf("s3://${bucket}", "s3.endpoint", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "region", "false");
-        // s3_tvf("s3://${bucket}", "s3.endpoint", "s3.access_key" , 
"AWS_SECRET_KEY", "region", "false");
-        // s3_tvf("s3://${bucket}", "cos.endpoint", "cos.access_key" , 
"cos.secret_key", "cos.region", "false");
-        // s3_tvf("s3://${bucket}", "s3.endpoint", "cos.access_key" , 
"cos.secret_key", "cos.region", "false");
+         s3_tvf("s3://${bucket}", "s3.endpoint", "s3.access_key" , 
"AWS_SECRET_KEY", "region", "false");
+         s3_tvf("s3://${bucket}", "cos.endpoint", "cos.access_key" , 
"cos.secret_key", "cos.region", "false");
+         s3_tvf("s3://${bucket}", "s3.endpoint", "cos.access_key" , 
"cos.secret_key", "cos.region", "false");
         s3_tvf("cos://${bucket}", "s3.endpoint", "s3.access_key" , 
"s3.secret_key", "region", "false");
         s3_tvf("cos://${bucket}", "s3.endpoint", "s3.access_key" , 
"s3.secret_key", "region", "false");
 
@@ -138,22 +139,21 @@ suite("test_s3_tvf_s3_storage", "p0") {
         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "region", "false");
         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "region", "false");
         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "s3.region", "false");
-        // s3_tvf("http://${bucket}.${s3_endpoint}";, "", "cos.access_key" , 
"cos.secret_key", "region", "false");
-        // s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "region", "true");
-        // s3_tvf("http://${bucket}.${s3_endpoint}";, "cos.endpoint", 
"s3.access_key" , "s3.secret_key", "region", "false");
+         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "cos.access_key" , 
"cos.secret_key", "region", "false");
+        s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "region", "false");
+         s3_tvf("http://${bucket}.${s3_endpoint}";, "cos.endpoint", 
"s3.access_key" , "s3.secret_key", "region", "false");
         s3_tvf("http://${bucket}.${s3_endpoint}";, "s3.endpoint", 
"s3.access_key" , "s3.secret_key", "region", "false");
         s3_tvf("http://${s3_endpoint}/${bucket}";, "", "s3.access_key" , 
"s3.secret_key", "region", "true");
-        // s3_tvf("http://${s3_endpoint}/${bucket}";, "", "s3.access_key" , 
"s3.secret_key", "region", "false");
-        // s3_tvf("s3://${s3_endpoint}/${bucket}", "", "s3.access_key" , 
"s3.secret_key", "region", "false");
+        s3_tvf("http://${s3_endpoint}/${bucket}";, "", "s3.access_key" , 
"s3.secret_key", "region", "false");
         s3_tvf("s3://${bucket}", "s3.endpoint", "s3.access_key" , 
"s3.secret_key", "region", "false");
         s3_tvf("s3://${bucket}", "s3.endpoint", "s3.access_key" , 
"s3.secret_key", "s3.region", "true");
-        // s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "region", "false");
-        // s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "s3.region", "false");
-        // s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "AWS_REGION", "false");
+         s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "region", "false");
+         s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "s3.region", "false");
+         s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "AWS_REGION", "false");
         s3_tvf("s3://${bucket}", "s3.endpoint", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "region", "false");
-        // s3_tvf("s3://${bucket}", "s3.endpoint", "s3.access_key" , 
"AWS_SECRET_KEY", "region", "false");
-        // s3_tvf("s3://${bucket}", "cos.endpoint", "cos.access_key" , 
"cos.secret_key", "cos.region", "false");
-        // s3_tvf("s3://${bucket}", "s3.endpoint", "cos.access_key" , 
"cos.secret_key", "cos.region", "false");
+         s3_tvf("s3://${bucket}", "s3.endpoint", "s3.access_key" , 
"AWS_SECRET_KEY", "region", "false");
+         s3_tvf("s3://${bucket}", "cos.endpoint", "cos.access_key" , 
"cos.secret_key", "cos.region", "false");
+         s3_tvf("s3://${bucket}", "s3.endpoint", "cos.access_key" , 
"cos.secret_key", "cos.region", "false");
         s3_tvf("cos://${bucket}", "s3.endpoint", "s3.access_key" , 
"s3.secret_key", "region", "false");
         s3_tvf("cos://${bucket}", "s3.endpoint", "s3.access_key" , 
"s3.secret_key", "region", "false");
 
@@ -176,9 +176,9 @@ suite("test_s3_tvf_s3_storage", "p0") {
         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "region", "false");
         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "region", "false");
         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "s3.region", "false");
-        // s3_tvf("http://${bucket}.${s3_endpoint}";, "", "cos.access_key" , 
"cos.secret_key", "region", "false");
-        // s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "region", "true");
-        // s3_tvf("http://${bucket}.${s3_endpoint}";, "cos.endpoint", 
"s3.access_key" , "s3.secret_key", "region", "false");
+         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "cos.access_key" , 
"cos.secret_key", "region", "false");
+        s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "region", "false");
+         s3_tvf("http://${bucket}.${s3_endpoint}";, "cos.endpoint", 
"s3.access_key" , "s3.secret_key", "region", "false");
         s3_tvf("http://${bucket}.${s3_endpoint}";, "s3.endpoint", 
"s3.access_key" , "s3.secret_key", "region", "false");
 
         // TODO(ftw): Note this case
@@ -222,22 +222,22 @@ suite("test_s3_tvf_s3_storage", "p0") {
         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "region", "false");
         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "region", "false");
         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "s3.region", "false");
-        // s3_tvf("http://${bucket}.${s3_endpoint}";, "", "cos.access_key" , 
"cos.secret_key", "region", "false");
-        // s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "region", "true");
-        // s3_tvf("http://${bucket}.${s3_endpoint}";, "cos.endpoint", 
"s3.access_key" , "s3.secret_key", "region", "false");
+         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "cos.access_key" , 
"cos.secret_key", "region", "false");
+         s3_tvf("http://${bucket}.${s3_endpoint}";, "", "s3.access_key" , 
"s3.secret_key", "region", "false");//fixme use_path_style
+         s3_tvf("http://${bucket}.${s3_endpoint}";, "cos.endpoint", 
"s3.access_key" , "s3.secret_key", "region", "false");
         s3_tvf("http://${bucket}.${s3_endpoint}";, "s3.endpoint", 
"s3.access_key" , "s3.secret_key", "region", "false");
         s3_tvf("http://${s3_endpoint}/${bucket}";, "", "s3.access_key" , 
"s3.secret_key", "region", "true");
-        // s3_tvf("http://${s3_endpoint}/${bucket}";, "", "s3.access_key" , 
"s3.secret_key", "region", "false");
-        // s3_tvf("s3://${s3_endpoint}/${bucket}", "", "s3.access_key" , 
"s3.secret_key", "region", "false");
+         s3_tvf("http://${s3_endpoint}/${bucket}";, "", "s3.access_key" , 
"s3.secret_key", "region", "false");
+        // should support in 2.1&3.0 s3_tvf("s3://${s3_endpoint}/${bucket}", 
"", "s3.access_key" , "s3.secret_key", "region", "false");
         s3_tvf("s3://${bucket}", "s3.endpoint", "s3.access_key" , 
"s3.secret_key", "region", "false");
         s3_tvf("s3://${bucket}", "s3.endpoint", "s3.access_key" , 
"s3.secret_key", "s3.region", "true");
-        // s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "region", "false");
-        // s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "s3.region", "false");
-        // s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "AWS_REGION", "false");
+         s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "region", "false");
+         s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "s3.region", "false");
+         s3_tvf("s3://${bucket}", "AWS_ENDPOINT", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "AWS_REGION", "false");
         s3_tvf("s3://${bucket}", "s3.endpoint", "AWS_ACCESS_KEY" , 
"AWS_SECRET_KEY", "region", "false");
-        // s3_tvf("s3://${bucket}", "s3.endpoint", "s3.access_key" , 
"AWS_SECRET_KEY", "region", "false");
-        // s3_tvf("s3://${bucket}", "cos.endpoint", "cos.access_key" , 
"cos.secret_key", "cos.region", "false");
-        // s3_tvf("s3://${bucket}", "s3.endpoint", "cos.access_key" , 
"cos.secret_key", "cos.region", "false");
+         s3_tvf("s3://${bucket}", "s3.endpoint", "s3.access_key" , 
"AWS_SECRET_KEY", "region", "false");
+         s3_tvf("s3://${bucket}", "cos.endpoint", "cos.access_key" , 
"cos.secret_key", "cos.region", "false");
+         s3_tvf("s3://${bucket}", "s3.endpoint", "cos.access_key" , 
"cos.secret_key", "cos.region", "false");
         s3_tvf("cos://${bucket}", "s3.endpoint", "s3.access_key" , 
"s3.secret_key", "region", "false");
         s3_tvf("cos://${bucket}", "s3.endpoint", "s3.access_key" , 
"s3.secret_key", "region", "false");
     } finally {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org


Reply via email to