This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 7094ee511af [feat](storage vault) Add object storage op check when 
creating resource (#48073)
7094ee511af is described below

commit 7094ee511af3d984727c7f6fd225e523c4fe5ab7
Author: Lei Zhang <zhang...@selectdb.com>
AuthorDate: Mon Mar 10 20:27:58 2025 +0800

    [feat](storage vault) Add object storage op check when creating resource 
(#48073)
    
    * When creating `S3Resource/AzureResource`, we will check if it can be
    accessed with `put/multiPartPut/list/head/delete` operator
---
 .../org/apache/doris/catalog/AzureResource.java    | 58 +++++++++++---
 .../java/org/apache/doris/catalog/S3Resource.java  | 84 +++++++++++++--------
 .../org/apache/doris/fs/obj/AzureObjStorage.java   | 47 +++++++++++-
 .../java/org/apache/doris/fs/obj/ObjStorage.java   |  4 +
 .../java/org/apache/doris/fs/obj/S3ObjStorage.java | 88 ++++++++++++++++++++++
 .../apache/doris/catalog/AzureResourceTest.java    | 57 ++++++++++++++
 .../org/apache/doris/catalog/S3ResourceTest.java   | 35 +++++++++
 .../vault_p0/conf/regression-conf-custom.groovy    |  1 -
 8 files changed, 331 insertions(+), 43 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java
index 53f52f3e8cc..fb04e25ad9e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java
@@ -19,11 +19,12 @@ package org.apache.doris.catalog;
 
 import org.apache.doris.backup.Status;
 import org.apache.doris.common.DdlException;
-import org.apache.doris.common.FeConstants;
 import org.apache.doris.common.proc.BaseProcResult;
 import org.apache.doris.common.util.PrintableMap;
 import org.apache.doris.datasource.property.constants.S3Properties;
-import org.apache.doris.fs.remote.AzureFileSystem;
+import org.apache.doris.fs.obj.AzureObjStorage;
+import org.apache.doris.fs.obj.ObjStorage;
+import org.apache.doris.fs.obj.RemoteObjects;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -31,6 +32,7 @@ import com.google.common.collect.Maps;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
+import java.io.ByteArrayInputStream;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
@@ -78,21 +80,57 @@ public class AzureResource extends Resource {
         this.properties = newProperties;
     }
 
-    private static void pingAzure(String bucketName, String rootPath,
+    protected static void pingAzure(String bucketName, String rootPath,
             Map<String, String> newProperties) throws DdlException {
-        if (FeConstants.runningUnitTest) {
-            return;
+
+        Long timestamp = System.currentTimeMillis();
+        String testObj = "azure://" + bucketName + "/" + rootPath
+                + "/doris-test-object-valid-" + timestamp.toString() + ".txt";
+
+        byte[] contentData = new byte[2 * ObjStorage.CHUNK_SIZE];
+        Arrays.fill(contentData, (byte) 'A');
+        AzureObjStorage azureObjStorage = new AzureObjStorage(newProperties);
+
+        Status status = azureObjStorage.putObject(testObj, new 
ByteArrayInputStream(contentData), contentData.length);
+        if (!Status.OK.equals(status)) {
+            throw new DdlException(
+                    "ping azure failed(put), status: " + status + ", 
properties: " + new PrintableMap<>(
+                            newProperties, "=", true, false, true, false));
         }
 
-        String testFile = "azure://" + bucketName + "/" + rootPath + 
"/test-object-valid.txt";
-        AzureFileSystem fileSystem = new AzureFileSystem(newProperties);
-        Status status = fileSystem.exists(testFile);
-        if (status != Status.OK && status.getErrCode() != 
Status.ErrCode.NOT_FOUND) {
+        status = azureObjStorage.headObject(testObj);
+        if (!Status.OK.equals(status)) {
             throw new DdlException(
                     "ping azure failed(head), status: " + status + ", 
properties: " + new PrintableMap<>(
                             newProperties, "=", true, false, true, false));
         }
-        LOG.info("success to ping azure");
+
+        RemoteObjects remoteObjects = azureObjStorage.listObjects(testObj, 
null);
+        LOG.info("remoteObjects: {}", remoteObjects);
+        Preconditions.checkArgument(remoteObjects.getObjectList().size() == 1, 
"remoteObjects.size() must equal 1");
+
+        status = azureObjStorage.deleteObject(testObj);
+        if (!Status.OK.equals(status)) {
+            throw new DdlException(
+                    "ping azure failed(delete), status: " + status + ", 
properties: " + new PrintableMap<>(
+                            newProperties, "=", true, false, true, false));
+        }
+
+        status = azureObjStorage.multipartUpload(testObj,
+                new ByteArrayInputStream(contentData), contentData.length);
+        if (!Status.OK.equals(status)) {
+            throw new DdlException(
+                    "ping azure failed(multiPartPut), status: " + status + ", 
properties: " + new PrintableMap<>(
+                            newProperties, "=", true, false, true, false));
+        }
+
+        status = azureObjStorage.deleteObject(testObj);
+        if (!Status.OK.equals(status)) {
+            throw new DdlException(
+                    "ping azure failed(delete), status: " + status + ", 
properties: " + new PrintableMap<>(
+                            newProperties, "=", true, false, true, false));
+        }
+        LOG.info("Success to ping azure blob storage.");
     }
 
     @Override
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Resource.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Resource.java
index 26747e826fd..392b73d2280 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Resource.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Resource.java
@@ -19,12 +19,13 @@ package org.apache.doris.catalog;
 
 import org.apache.doris.backup.Status;
 import org.apache.doris.common.DdlException;
-import org.apache.doris.common.FeConstants;
 import org.apache.doris.common.credentials.CloudCredentialWithEndpoint;
 import org.apache.doris.common.proc.BaseProcResult;
 import org.apache.doris.common.util.PrintableMap;
 import org.apache.doris.datasource.property.constants.S3Properties;
-import org.apache.doris.fs.remote.S3FileSystem;
+import org.apache.doris.fs.obj.ObjStorage;
+import org.apache.doris.fs.obj.RemoteObjects;
+import org.apache.doris.fs.obj.S3ObjStorage;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -33,6 +34,7 @@ import com.google.gson.annotations.SerializedName;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
+import java.io.ByteArrayInputStream;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
@@ -102,45 +104,65 @@ public class S3Resource extends Resource {
         }
         String region = S3Properties.getRegionOfEndpoint(pingEndpoint);
         properties.putIfAbsent(S3Properties.REGION, region);
-        String ak = properties.get(S3Properties.ACCESS_KEY);
-        String sk = properties.get(S3Properties.SECRET_KEY);
-        String token = properties.get(S3Properties.SESSION_TOKEN);
-        CloudCredentialWithEndpoint credential = new 
CloudCredentialWithEndpoint(pingEndpoint, region, ak, sk, token);
 
         if (needCheck) {
             String bucketName = properties.get(S3Properties.BUCKET);
             String rootPath = properties.get(S3Properties.ROOT_PATH);
-            pingS3(credential, bucketName, rootPath, properties);
+            pingS3(bucketName, rootPath, properties);
         }
         // optional
         S3Properties.optionalS3Property(properties);
         this.properties = properties;
     }
 
-    private static void pingS3(CloudCredentialWithEndpoint credential, String 
bucketName, String rootPath,
-            Map<String, String> properties) throws DdlException {
-        S3FileSystem fileSystem = new S3FileSystem(properties);
-        String testFile = "s3://" + bucketName + "/" + rootPath + 
"/test-object-valid.txt";
-        String content = "doris will be better";
-        if (FeConstants.runningUnitTest) {
-            return;
+    protected static void pingS3(String bucketName, String rootPath, 
Map<String, String> newProperties)
+            throws DdlException {
+
+        Long timestamp = System.currentTimeMillis();
+        String prefix = "s3://" + bucketName + "/" + rootPath;
+        String testObj = prefix + "/doris-test-object-valid-" + 
timestamp.toString() + ".txt";
+
+        byte[] contentData = new byte[2 * ObjStorage.CHUNK_SIZE];
+        Arrays.fill(contentData, (byte) 'A');
+        S3ObjStorage s3ObjStorage = new S3ObjStorage(newProperties);
+
+        Status status = s3ObjStorage.putObject(testObj, new 
ByteArrayInputStream(contentData), contentData.length);
+        if (!Status.OK.equals(status)) {
+            String errMsg = "pingS3 failed(put),"
+                    + " please check your endpoint, ak/sk or 
permissions(put/head/delete/list/multipartUpload),"
+                    + " status: " + status + ", properties: " + new 
PrintableMap<>(
+                            newProperties, "=", true, false, true, false);
+            throw new DdlException(errMsg);
         }
-        Status status = Status.OK;
-        try {
-            status = fileSystem.directUpload(content, testFile);
-            if (status != Status.OK) {
-                throw new DdlException(
-                        "ping s3 failed(upload), status: " + status + ", 
properties: " + new PrintableMap<>(
-                                properties, "=", true, false, true, false));
-            }
-        } finally {
-            if (status.ok()) {
-                Status delete = fileSystem.delete(testFile);
-                if (delete != Status.OK) {
-                    LOG.warn("delete test file failed, status: {}, properties: 
{}", delete, new PrintableMap<>(
-                            properties, "=", true, false, true, false));
-                }
-            }
+
+        status = s3ObjStorage.headObject(testObj);
+        if (!Status.OK.equals(status)) {
+            String errMsg = "pingS3 failed(head),"
+                    + " please check your endpoint, ak/sk or 
permissions(put/head/delete/list/multipartUpload),"
+                    + " status: " + status + ", properties: " + new 
PrintableMap<>(
+                            newProperties, "=", true, false, true, false);
+            throw new DdlException(errMsg);
+        }
+
+        RemoteObjects remoteObjects = s3ObjStorage.listObjects(testObj, null);
+        LOG.info("remoteObjects: {}", remoteObjects);
+
+        status = s3ObjStorage.multipartUpload(testObj, new 
ByteArrayInputStream(contentData), contentData.length);
+        if (!Status.OK.equals(status)) {
+            String errMsg = "pingS3 failed(multipartUpload),"
+                    + " please check your endpoint, ak/sk or 
permissions(put/head/delete/list/multipartUpload),"
+                    + " status: " + status + ", properties: " + new 
PrintableMap<>(
+                            newProperties, "=", true, false, true, false);
+            throw new DdlException(errMsg);
+        }
+
+        status = s3ObjStorage.deleteObject(testObj);
+        if (!Status.OK.equals(status)) {
+            String errMsg = "pingS3 failed(delete),"
+                    + " please check your endpoint, ak/sk or 
permissions(put/head/delete/list/multipartUpload),"
+                    + " status: " + status + ", properties: " + new 
PrintableMap<>(
+                            newProperties, "=", true, false, true, false);
+            throw new DdlException(errMsg);
         }
 
         LOG.info("success to ping s3");
@@ -172,7 +194,7 @@ public class S3Resource extends Resource {
             String rootPath = properties.getOrDefault(S3Properties.ROOT_PATH,
                     this.properties.get(S3Properties.ROOT_PATH));
 
-            pingS3(getS3PingCredentials(changedProperties), bucketName, 
rootPath, changedProperties);
+            pingS3(bucketName, rootPath, changedProperties);
         }
 
         // modify properties
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java 
b/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java
index e59bc6ac52e..7e4e0e5fd02 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java
@@ -42,6 +42,7 @@ import com.azure.storage.blob.models.BlobItem;
 import com.azure.storage.blob.models.BlobProperties;
 import com.azure.storage.blob.models.BlobStorageException;
 import com.azure.storage.blob.models.ListBlobsOptions;
+import com.azure.storage.blob.specialized.BlockBlobClient;
 import com.azure.storage.common.StorageSharedKeyCredential;
 import org.apache.commons.lang3.tuple.Triple;
 import org.apache.http.HttpStatus;
@@ -49,16 +50,19 @@ import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 import org.jetbrains.annotations.Nullable;
 
+import java.io.ByteArrayInputStream;
 import java.io.File;
 import java.io.InputStream;
 import java.nio.file.FileSystems;
 import java.nio.file.PathMatcher;
 import java.nio.file.Paths;
 import java.util.ArrayList;
+import java.util.Base64;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
+import java.util.UUID;
 
 public class AzureObjStorage implements ObjStorage<BlobServiceClient> {
     private static final Logger LOG = 
LogManager.getLogger(AzureObjStorage.class);
@@ -165,10 +169,12 @@ public class AzureObjStorage implements 
ObjStorage<BlobServiceClient> {
             LOG.info("get file " + remoteFilePath + " success: " + 
properties.toString());
             return Status.OK;
         } catch (BlobStorageException e) {
+            LOG.warn("{} getObject exception:", remoteFilePath, e);
             return new Status(
                     Status.ErrCode.COMMON_ERROR,
                     "get file from azure error: " + e.getServiceMessage());
         } catch (UserException e) {
+            LOG.warn("{} getObject exception:", remoteFilePath, e);
             return new Status(Status.ErrCode.COMMON_ERROR, "getObject "
                     + remoteFilePath + " failed: " + e.getMessage());
         }
@@ -182,10 +188,12 @@ public class AzureObjStorage implements 
ObjStorage<BlobServiceClient> {
             blobClient.upload(content, contentLength);
             return Status.OK;
         } catch (BlobStorageException e) {
+            LOG.warn("{} putObject exception:", remotePath, e);
             return new Status(
                     Status.ErrCode.COMMON_ERROR,
                     "Error occurred while copying the blob:: " + 
e.getServiceMessage());
         } catch (UserException e) {
+            LOG.warn("{} putObject exception:", remotePath, e);
             return new Status(Status.ErrCode.COMMON_ERROR, "putObject "
                     + remotePath + " failed: " + e.getMessage());
         }
@@ -276,8 +284,8 @@ public class AzureObjStorage implements 
ObjStorage<BlobServiceClient> {
     @Override
     public RemoteObjects listObjects(String remotePath, String 
continuationToken) throws DdlException {
         try {
-            ListBlobsOptions options = new 
ListBlobsOptions().setPrefix(remotePath);
             S3URI uri = S3URI.create(remotePath, isUsePathStyle, 
forceParsingByStandardUri);
+            ListBlobsOptions options = new 
ListBlobsOptions().setPrefix(uri.getKey());
             PagedIterable<BlobItem> pagedBlobs = 
getClient().getBlobContainerClient(uri.getBucket())
                     .listBlobs(options, continuationToken, null);
             PagedResponse<BlobItem> pagedResponse = 
pagedBlobs.iterableByPage().iterator().next();
@@ -408,4 +416,41 @@ public class AzureObjStorage implements 
ObjStorage<BlobServiceClient> {
         PagedIterable<BlobItem> pagedBlobs = client.listBlobs(options, 
newContinuationToken, null);
         return pagedBlobs.iterableByPage().iterator().next();
     }
+
+
+    public Status multipartUpload(String remotePath, @Nullable InputStream 
inputStream, long totalBytes) {
+        Status st = Status.OK;
+        long uploadedBytes = 0;
+        int bytesRead = 0;
+        byte[] buffer = new byte[CHUNK_SIZE];
+        List<String> blockIds = new ArrayList<>();
+        BlockBlobClient blockBlobClient = null;
+
+
+        try {
+            S3URI uri = S3URI.create(remotePath, isUsePathStyle, 
forceParsingByStandardUri);
+            blockBlobClient = 
getClient().getBlobContainerClient(uri.getBucket())
+                    .getBlobClient(uri.getKey()).getBlockBlobClient();
+            while (uploadedBytes < totalBytes && (bytesRead = 
inputStream.read(buffer)) != -1) {
+                String blockId = 
Base64.getEncoder().encodeToString(UUID.randomUUID().toString().getBytes());
+                blockIds.add(blockId);
+                blockBlobClient.stageBlock(blockId, new 
ByteArrayInputStream(buffer, 0, bytesRead), bytesRead);
+                uploadedBytes += bytesRead;
+            }
+            blockBlobClient.commitBlockList(blockIds);
+        } catch (Exception e) {
+            LOG.warn("remotePath:{}, ", remotePath, e);
+            st = new Status(Status.ErrCode.COMMON_ERROR, "Failed to 
multipartUpload " + remotePath
+                    + " reason: " + e.getMessage());
+
+            if (blockBlobClient != null) {
+                try {
+                    blockBlobClient.delete();
+                } catch (Exception e1) {
+                    LOG.warn("abort multipartUpload failed", e1);
+                }
+            }
+        }
+        return st;
+    }
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/obj/ObjStorage.java 
b/fe/fe-core/src/main/java/org/apache/doris/fs/obj/ObjStorage.java
index f2315ff169c..f222dddbe83 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/fs/obj/ObjStorage.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/fs/obj/ObjStorage.java
@@ -32,6 +32,10 @@ import java.io.InputStream;
  * @param <C> cloud SDK Client
  */
 public interface ObjStorage<C> {
+
+    // CHUNK_SIZE for multi part upload
+    public static final int CHUNK_SIZE = 5 * 1024 * 1024;
+
     C getClient() throws UserException;
 
     Triple<String, String, String> getStsToken() throws DdlException;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/obj/S3ObjStorage.java 
b/fe/fe-core/src/main/java/org/apache/doris/fs/obj/S3ObjStorage.java
index edcb54bf8fa..5249c9f49d8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/fs/obj/S3ObjStorage.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/fs/obj/S3ObjStorage.java
@@ -34,8 +34,14 @@ import org.apache.logging.log4j.Logger;
 import org.jetbrains.annotations.Nullable;
 import software.amazon.awssdk.core.sync.RequestBody;
 import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest;
+import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest;
+import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload;
+import software.amazon.awssdk.services.s3.model.CompletedPart;
 import software.amazon.awssdk.services.s3.model.CopyObjectRequest;
 import software.amazon.awssdk.services.s3.model.CopyObjectResponse;
+import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest;
+import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse;
 import software.amazon.awssdk.services.s3.model.Delete;
 import software.amazon.awssdk.services.s3.model.DeleteObjectRequest;
 import software.amazon.awssdk.services.s3.model.DeleteObjectResponse;
@@ -52,11 +58,15 @@ import 
software.amazon.awssdk.services.s3.model.PutObjectRequest;
 import software.amazon.awssdk.services.s3.model.PutObjectResponse;
 import software.amazon.awssdk.services.s3.model.S3Exception;
 import software.amazon.awssdk.services.s3.model.S3Object;
+import software.amazon.awssdk.services.s3.model.UploadPartRequest;
+import software.amazon.awssdk.services.s3.model.UploadPartResponse;
 
+import java.io.ByteArrayInputStream;
 import java.io.File;
 import java.io.InputStream;
 import java.net.URI;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
@@ -255,6 +265,7 @@ public class S3ObjStorage implements ObjStorage<S3Client> {
             LOG.info("total delete {} objects for dir {}", totalObjects, 
absolutePath);
             return Status.OK;
         } catch (DdlException e) {
+            LOG.warn("deleteObjects:", e);
             return new Status(Status.ErrCode.COMMON_ERROR, "list objects for 
delete objects failed: " + e.getMessage());
         } catch (Exception e) {
             LOG.warn(String.format("delete objects %s failed", absolutePath), 
e);
@@ -309,4 +320,81 @@ public class S3ObjStorage implements ObjStorage<S3Client> {
             throw new DdlException("Failed to list objects for S3, Error 
message: " + e.getMessage(), e);
         }
     }
+
+    public Status multipartUpload(String remotePath, @Nullable InputStream 
inputStream, long totalBytes) {
+        Status st = Status.OK;
+        long uploadedBytes = 0;
+        int bytesRead = 0;
+        byte[] buffer = new byte[CHUNK_SIZE];
+        int partNumber = 1;
+
+        String uploadId = null;
+        S3URI uri = null;
+        Map<Integer, String> etags = new HashMap<>();
+
+        try {
+            uri = S3URI.create(remotePath, isUsePathStyle, 
forceParsingByStandardUri);
+            CreateMultipartUploadRequest createMultipartUploadRequest = 
CreateMultipartUploadRequest.builder()
+                    .bucket(uri.getBucket())
+                    .key(uri.getKey())
+                    .build();
+            CreateMultipartUploadResponse createMultipartUploadResponse = 
getClient()
+                    .createMultipartUpload(createMultipartUploadRequest);
+
+            uploadId = createMultipartUploadResponse.uploadId();
+
+            while (uploadedBytes < totalBytes && (bytesRead = 
inputStream.read(buffer)) != -1) {
+                uploadedBytes += bytesRead;
+                UploadPartRequest uploadPartRequest = 
UploadPartRequest.builder()
+                        .bucket(uri.getBucket())
+                        .key(uri.getKey())
+                        .uploadId(uploadId)
+                        .partNumber(partNumber).build();
+                RequestBody body = RequestBody
+                        .fromInputStream(new ByteArrayInputStream(buffer, 0, 
bytesRead), bytesRead);
+                UploadPartResponse uploadPartResponse = 
getClient().uploadPart(uploadPartRequest, body);
+
+                etags.put(partNumber, uploadPartResponse.eTag());
+                partNumber++;
+                uploadedBytes += bytesRead;
+            }
+
+            List<CompletedPart> completedParts = etags.entrySet().stream()
+                    .map(entry -> CompletedPart.builder()
+                            .partNumber(entry.getKey())
+                            .eTag(entry.getValue())
+                            .build())
+                    .collect(Collectors.toList());
+            CompletedMultipartUpload completedMultipartUpload = 
CompletedMultipartUpload.builder()
+                    .parts(completedParts)
+                    .build();
+
+            CompleteMultipartUploadRequest completeMultipartUploadRequest = 
CompleteMultipartUploadRequest.builder()
+                    .bucket(uri.getBucket())
+                    .key(uri.getKey())
+                    .uploadId(uploadId)
+                    .multipartUpload(completedMultipartUpload)
+                    .build();
+
+            
getClient().completeMultipartUpload(completeMultipartUploadRequest);
+        } catch (Exception e) {
+            LOG.warn("remotePath:{}, ", remotePath, e);
+            st = new Status(Status.ErrCode.COMMON_ERROR, "Failed to 
multipartUpload " + remotePath
+                    + " reason: " + e.getMessage());
+
+            if (uri != null && uploadId != null) {
+                try {
+                    AbortMultipartUploadRequest abortMultipartUploadRequest = 
AbortMultipartUploadRequest.builder()
+                            .bucket(uri.getBucket())
+                            .key(uri.getKey())
+                            .uploadId(uploadId)
+                            .build();
+                    
getClient().abortMultipartUpload(abortMultipartUploadRequest);
+                } catch (Exception e1) {
+                    LOG.warn("Failed to abort multipartUpload " + remotePath, 
e1);
+                }
+            }
+        }
+        return st;
+    }
 }
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/catalog/AzureResourceTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/catalog/AzureResourceTest.java
new file mode 100644
index 00000000000..cbf50d6cc58
--- /dev/null
+++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/AzureResourceTest.java
@@ -0,0 +1,57 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.catalog;
+
+import org.apache.doris.common.DdlException;
+
+import com.google.common.base.Strings;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class AzureResourceTest {
+    private static final Logger LOG = 
LogManager.getLogger(AzureResourceTest.class);
+
+    @Test
+    public void testPingAzure() {
+        try {
+            String azureAccoutName = System.getenv("AZURE_ACCOUNT_NAME");
+            String azureAccoutKey = System.getenv("AZURE_ACCOUNT_KEY");
+            String azureContainerName = System.getenv("AZURE_CONTAINER_NAME");
+
+            Assumptions.assumeTrue(!Strings.isNullOrEmpty(azureAccoutName), 
"AZURE_ACCOUNT_NAME isNullOrEmpty.");
+            Assumptions.assumeTrue(!Strings.isNullOrEmpty(azureAccoutKey), 
"AZURE_ACCOUNT_KEY isNullOrEmpty.");
+            Assumptions.assumeTrue(!Strings.isNullOrEmpty(azureContainerName), 
"AZURE_CONTAINER_NAME isNullOrEmpty.");
+
+            Map<String, String> properties = new HashMap<>();
+            properties.put("s3.endpoint", "endpoint");
+            properties.put("s3.region", "region");
+            properties.put("s3.access_key", azureAccoutName);
+            properties.put("s3.secret_key", azureAccoutKey);
+            AzureResource.pingAzure(azureContainerName, "fe_ut_prefix", 
properties);
+        } catch (DdlException e) {
+            LOG.info("testPingAzure exception:", e);
+            Assertions.assertTrue(false, e.getMessage());
+        }
+    }
+}
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/catalog/S3ResourceTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/catalog/S3ResourceTest.java
index 4e620d56903..5f2daf94cc3 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/catalog/S3ResourceTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/S3ResourceTest.java
@@ -30,10 +30,14 @@ import 
org.apache.doris.mysql.privilege.AccessControllerManager;
 import org.apache.doris.mysql.privilege.PrivPredicate;
 import org.apache.doris.qe.ConnectContext;
 
+import com.google.common.base.Strings;
 import mockit.Expectations;
 import mockit.Injectable;
 import mockit.Mocked;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
 import org.junit.Assert;
+import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -46,6 +50,7 @@ import java.util.HashMap;
 import java.util.Map;
 
 public class S3ResourceTest {
+    private static final Logger LOG = 
LogManager.getLogger(S3ResourceTest.class);
     private String name;
     private String type;
 
@@ -238,4 +243,34 @@ public class S3ResourceTest {
         s3Resource.setProperties(properties);
         Assert.assertEquals(s3Resource.getProperty(S3Properties.ENDPOINT), 
"https://aaa";);
     }
+
+    @Test
+    public void testPingS3() {
+        try {
+            String accessKey = System.getenv("ACCESS_KEY");
+            String secretKey = System.getenv("SECRET_KEY");
+            String bucket = System.getenv("BUCKET");
+            String endpoint = System.getenv("ENDPOINT");
+            String region = System.getenv("REGION");
+            String provider = System.getenv("PROVIDER");
+
+            Assume.assumeTrue("ACCESS_KEY isNullOrEmpty.", 
!Strings.isNullOrEmpty(accessKey));
+            Assume.assumeTrue("SECRET_KEY isNullOrEmpty.", 
!Strings.isNullOrEmpty(secretKey));
+            Assume.assumeTrue("BUCKET isNullOrEmpty.", 
!Strings.isNullOrEmpty(bucket));
+            Assume.assumeTrue("ENDPOINT isNullOrEmpty.", 
!Strings.isNullOrEmpty(endpoint));
+            Assume.assumeTrue("REGION isNullOrEmpty.", 
!Strings.isNullOrEmpty(region));
+            Assume.assumeTrue("PROVIDER isNullOrEmpty.", 
!Strings.isNullOrEmpty(provider));
+
+            Map<String, String> properties = new HashMap<>();
+            properties.put("s3.endpoint", endpoint);
+            properties.put("s3.region", region);
+            properties.put("s3.access_key", accessKey);
+            properties.put("s3.secret_key", secretKey);
+            properties.put("provider", provider);
+            S3Resource.pingS3(bucket, "fe_ut_prefix", properties);
+        } catch (DdlException e) {
+            LOG.info("testPingS3 exception:", e);
+            Assert.assertTrue(e.getMessage(), false);
+        }
+    }
 }
diff --git 
a/regression-test/pipeline/vault_p0/conf/regression-conf-custom.groovy 
b/regression-test/pipeline/vault_p0/conf/regression-conf-custom.groovy
index adcf602a116..3308682b0ba 100644
--- a/regression-test/pipeline/vault_p0/conf/regression-conf-custom.groovy
+++ b/regression-test/pipeline/vault_p0/conf/regression-conf-custom.groovy
@@ -44,5 +44,4 @@ extMinioSk = "minioadmin"
 extMinioRegion = "us-east-1"
 extMinioBucket = "test-bucket"
 
-s3Source = "aliyun"
 s3Endpoint = "oss-cn-hongkong-internal.aliyuncs.com"


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to