This is an automated email from the ASF dual-hosted git repository.

kirs pushed a commit to branch branch-refactor_property
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-refactor_property by 
this push:
     new 72fe2c9a75b [Param Refactor] (Azure) Add Azure Object Storage 
Implementation (#50000)
72fe2c9a75b is described below

commit 72fe2c9a75b5f6e62c19c5d367e55deee6a408c4
Author: Calvin Kirs <guoqi...@selectdb.com>
AuthorDate: Tue Apr 15 10:36:04 2025 +0800

    [Param Refactor] (Azure) Add Azure Object Storage Implementation (#50000)
    
    This PR adds support for Azure Blob Storage under the newly refactored
    parameter-based design. Key changes include:
    
    Introduced Azure object storage implementation that integrates with the
    unified parameter injection framework
    
    Supports standard Azure endpoint formats (e.g.,
    https://<account>.blob.core.windows.net)
    
    Handles parsing of container names and path structures
    
    Added configuration option handling and default value fallback logic
---
 .../org/apache/doris/catalog/AzureResource.java    |  29 +---
 .../property/storage/AzureProperties.java          | 136 +++++++++++++++
 .../property/storage/StorageProperties.java        |  15 +-
 .../property/storage/StorageProviderType.java      | 103 ++++++++++++
 .../property/storage/StorageTypeMapper.java        |   2 +
 .../org/apache/doris/fs/obj/AzureObjStorage.java   |  55 ++-----
 .../apache/doris/fs/remote/AzureFileSystem.java    |  19 +--
 .../property/storage/AzurePropertiesTest.java      | 115 +++++++++++++
 .../apache/doris/fs/obj/AzureObjStorageTest.java   |   4 +-
 regression-test/conf/regression-conf.groovy        |   1 +
 .../backup_restore_azure.groovy                    | 182 +++++++++++++++++++++
 11 files changed, 575 insertions(+), 86 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java
index 53f52f3e8cc..c1873b9fdf4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java
@@ -23,9 +23,9 @@ import org.apache.doris.common.FeConstants;
 import org.apache.doris.common.proc.BaseProcResult;
 import org.apache.doris.common.util.PrintableMap;
 import org.apache.doris.datasource.property.constants.S3Properties;
+import org.apache.doris.datasource.property.storage.AzureProperties;
 import org.apache.doris.fs.remote.AzureFileSystem;
 
-import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import org.apache.logging.log4j.LogManager;
@@ -51,30 +51,6 @@ public class AzureResource extends Resource {
 
     @Override
     protected void setProperties(Map<String, String> newProperties) throws 
DdlException {
-        Preconditions.checkState(newProperties != null);
-        // check properties
-        S3Properties.requiredS3PingProperties(newProperties);
-        // default need check resource conf valid, so need fix ut and 
regression case
-        boolean needCheck = isNeedCheck(newProperties);
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("azure info need check validity : {}", needCheck);
-        }
-
-        // the endpoint for ping need add uri scheme.
-        String pingEndpoint = newProperties.get(S3Properties.ENDPOINT);
-        if (!pingEndpoint.startsWith("http://";)) {
-            pingEndpoint = "http://"; + 
newProperties.get(S3Properties.ENDPOINT);
-            newProperties.put(S3Properties.ENDPOINT, pingEndpoint);
-            newProperties.put(S3Properties.Env.ENDPOINT, pingEndpoint);
-        }
-
-        if (needCheck) {
-            String bucketName = newProperties.get(S3Properties.BUCKET);
-            String rootPath = newProperties.get(S3Properties.ROOT_PATH);
-            pingAzure(bucketName, rootPath, newProperties);
-        }
-        // optional
-        S3Properties.optionalS3Property(newProperties);
         this.properties = newProperties;
     }
 
@@ -85,7 +61,8 @@ public class AzureResource extends Resource {
         }
 
         String testFile = "azure://" + bucketName + "/" + rootPath + 
"/test-object-valid.txt";
-        AzureFileSystem fileSystem = new AzureFileSystem(newProperties);
+        AzureProperties azureProperties = new AzureProperties(newProperties);
+        AzureFileSystem fileSystem = new AzureFileSystem(azureProperties);
         Status status = fileSystem.exists(testFile);
         if (status != Status.OK && status.getErrCode() != 
Status.ErrCode.NOT_FOUND) {
             throw new DdlException(
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzureProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzureProperties.java
new file mode 100644
index 00000000000..85c593e1890
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzureProperties.java
@@ -0,0 +1,136 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import org.apache.doris.common.UserException;
+import org.apache.doris.datasource.property.ConnectorProperty;
+
+import lombok.Getter;
+import lombok.Setter;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * AzureProperties is a specialized configuration class for accessing Azure 
Blob Storage
+ * using an S3-compatible interface.
+ *
+ * <p>This class extends {@link StorageProperties} and adapts Azure-specific 
properties
+ * to a format that is compatible with the backend engine (BE), which expects 
configurations
+ * similar to Amazon S3. This is necessary because the backend is designed to 
work with
+ * S3-style parameters regardless of the actual cloud provider.
+ *
+ * <p>Although Azure Blob Storage does not use all of the S3 parameters (e.g., 
region),
+ * this class maps and provides dummy or compatible values to satisfy the 
expected format.
+ * It also tags the provider as "azure" in the final configuration map.
+ *
+ * <p>The class supports common parameters like access key, secret key, 
endpoint, and
+ * path style access, while also ensuring compatibility with existing S3 
processing
+ * logic by delegating some functionality to {@code S3PropertyUtils}.
+ *
+ * <p>Typical usage includes validation of required parameters, transformation 
to a
+ * backend-compatible configuration map, and conversion of URLs to storage 
paths.
+ *
+ * <p>Note: This class may evolve as the backend introduces native Azure 
support
+ * or adopts a more flexible configuration model.
+ *
+ * @see StorageProperties
+ * @see S3PropertyUtils
+ */
+public class AzureProperties extends StorageProperties {
+    @Getter
+    @ConnectorProperty(names = {"s3.endpoint", "AWS_ENDPOINT", "access_key"},
+            required = false,
+            description = "The endpoint of S3.")
+    protected String endpoint = "";
+
+
+    @Getter
+    @Setter
+    @ConnectorProperty(names = {"s3.access_key", "AWS_ACCESS_KEY", 
"ACCESS_KEY", "access_key"},
+            description = "The access key of S3.")
+    protected String accessKey = "";
+
+    @Getter
+    @Setter
+    @ConnectorProperty(names = {"s3.secret_key", "AWS_SECRET_KEY", 
"secret_key"},
+            description = "The secret key of S3.")
+    protected String secretKey = "";
+
+    @Getter
+    @Setter
+    @ConnectorProperty(names = {"s3.bucket"},
+            required = false,
+            description = "The container of Azure blob.")
+    protected String container = "";
+
+    /**
+     * Flag indicating whether to use path-style URLs for the object storage 
system.
+     * This value is optional and can be configured by the user.
+     */
+    @Setter
+    @Getter
+    @ConnectorProperty(names = {"use_path_style", "s3.path-style-access"}, 
required = false,
+            description = "Whether to use path style URL for the storage.")
+    protected String usePathStyle = "false";
+    @ConnectorProperty(names = {"force_parsing_by_standard_uri"}, required = 
false,
+            description = "Whether to use path style URL for the storage.")
+    @Setter
+    @Getter
+    protected String forceParsingByStandardUrl = "false";
+
+
+    public AzureProperties(Map<String, String> origProps) {
+        super(Type.AZURE, origProps);
+    }
+
+    public AzureProperties(Map<String, String> origProps, String accessKey, 
String secretKey) {
+        super(Type.AZURE, origProps);
+        this.accessKey = accessKey;
+        this.secretKey = secretKey;
+    }
+
+    @Override
+    public Map<String, String> getBackendConfigProperties() {
+        Map<String, String> s3Props = new HashMap<>();
+        s3Props.put("AWS_ENDPOINT", endpoint);
+        s3Props.put("AWS_REGION", "dummy_region");
+        s3Props.put("AWS_ACCESS_KEY", accessKey);
+        s3Props.put("AWS_SECRET_KEY", secretKey);
+        s3Props.put("AWS_NEED_OVERRIDE_ENDPOINT", "true");
+        s3Props.put("provider", "azure");
+        s3Props.put("use_path_style", usePathStyle);
+        return s3Props;
+    }
+
+    @Override
+    public String convertUrlToFilePath(String url) throws UserException {
+        return S3PropertyUtils.convertToS3Address(url, usePathStyle, 
forceParsingByStandardUrl);
+    }
+
+    @Override
+    public String checkLoadPropsAndReturnUri(Map<String, String> loadProps) 
throws UserException {
+
+        return S3PropertyUtils.checkLoadPropsAndReturnUri(loadProps);
+    }
+
+    @Override
+    public String getStorageName() {
+        return "S3";
+    }
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProperties.java
index 44e940924b3..640d7ad4e8a 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProperties.java
@@ -23,6 +23,7 @@ import org.apache.doris.datasource.property.ConnectorProperty;
 
 import com.google.common.collect.Lists;
 import lombok.Getter;
+import org.apache.commons.lang3.StringUtils;
 
 import java.lang.reflect.Field;
 import java.util.List;
@@ -42,6 +43,7 @@ public abstract class StorageProperties extends 
ConnectionProperties {
         HDFS,
         S3,
         OSS,
+        AZURE,
         OBS,
         COS,
         UNKNOWN
@@ -90,9 +92,6 @@ public abstract class StorageProperties extends 
ConnectionProperties {
         if (isFsSupport(origProps, FS_GCS_SUPPORT)) {
             throw new RuntimeException("Unsupported native GCS filesystem");
         }
-        if (isFsSupport(origProps, FS_AZURE_SUPPORT)) {
-            throw new RuntimeException("Unsupported native AZURE filesystem");
-        }
 
         if (storageProperties.isEmpty()) {
             throw new RuntimeException("Unknown storage type");
@@ -106,6 +105,16 @@ public abstract class StorageProperties extends 
ConnectionProperties {
 
     public static StorageProperties createStorageProperties(Map<String, 
String> origProps) {
         StorageProperties storageProperties = null;
+        String provider = origProps.get("provider");
+        if (StringUtils.isNotBlank(provider)) {
+            return StorageProviderType.fromKey(provider)
+                    .map(type -> {
+                        StorageProperties properties = type.create(origProps);
+                        properties.normalizedAndCheckProps();
+                        return properties;
+                    })
+                    .orElseThrow(() -> new IllegalArgumentException("Unknown 
provider: " + provider));
+        }
         // 1. parse the storage properties by user specified fs.xxx.support 
properties
         if (isFsSupport(origProps, FS_HDFS_SUPPORT) || 
HDFSProperties.guessIsMe(origProps)) {
             storageProperties = new HDFSProperties(origProps);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProviderType.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProviderType.java
new file mode 100644
index 00000000000..7f1e72ed1de
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProviderType.java
@@ -0,0 +1,103 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Optional;
+
+/**
+ * Enum representing supported storage provider types and their associated 
configuration logic.
+ *
+ * <p>This enum acts as a central registry for all built-in storage providers 
(e.g., S3, Azure, HDFS, etc.)
+ * and is responsible for creating corresponding {@link StorageProperties} 
instances based on a
+ * given configuration map.
+ *
+ * <p>Each enum constant overrides the {@link #create(Map)} method to 
instantiate its specific
+ * implementation of {@code StorageProperties}.
+ *
+ * <p>While this enum currently serves as a simple type-safe factory for 
internal use, it is also
+ * designed to facilitate a future transition to a Service Provider Interface 
(SPI) model. At the
+ * current stage, we deliberately avoid using {@code ServiceLoader} or 
external factory classes
+ * to reduce complexity and maintain tighter control over supported providers.
+ *
+ * <p>To map a string key (e.g., from user config) to a provider type, use the 
{@link #fromKey(String)}
+ * method which performs a case-insensitive match.
+ *
+ * @see StorageProperties
+ * @see AzureProperties
+ * @see HDFSProperties
+ * @see OSSProperties
+ * @see OBSProperties
+ * @see COSProperties
+ * @see S3Properties
+ */
+public enum StorageProviderType {
+    AZURE("azure") {
+        @Override
+        public StorageProperties create(Map<String, String> props) {
+            return new AzureProperties(props);
+        }
+    },
+    HDFS("hdfs") {
+        @Override
+        public StorageProperties create(Map<String, String> props) {
+            return new HDFSProperties(props);
+        }
+    },
+    OSS("oss") {
+        @Override
+        public StorageProperties create(Map<String, String> props) {
+            return new OSSProperties(props);
+        }
+    },
+    OBS("obs") {
+        @Override
+        public StorageProperties create(Map<String, String> props) {
+            return new OBSProperties(props);
+        }
+    },
+    COS("cos") {
+        @Override
+        public StorageProperties create(Map<String, String> props) {
+            return new COSProperties(props);
+        }
+    },
+    S3("s3") {
+        @Override
+        public StorageProperties create(Map<String, String> props) {
+            return new S3Properties(props);
+        }
+    },
+
+    ;
+
+    private final String key;
+
+    StorageProviderType(String key) {
+        this.key = key;
+    }
+
+    public abstract StorageProperties create(Map<String, String> props);
+
+    public static Optional<StorageProviderType> fromKey(String key) {
+        return Arrays.stream(values())
+                .filter(e -> e.key.equalsIgnoreCase(key))
+                .findFirst();
+    }
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageTypeMapper.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageTypeMapper.java
index 4cff2a1a675..9e16b95baa7 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageTypeMapper.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageTypeMapper.java
@@ -17,6 +17,7 @@
 
 package org.apache.doris.datasource.property.storage;
 
+import org.apache.doris.fs.remote.AzureFileSystem;
 import org.apache.doris.fs.remote.RemoteFileSystem;
 import org.apache.doris.fs.remote.S3FileSystem;
 import org.apache.doris.fs.remote.dfs.DFSFileSystem;
@@ -29,6 +30,7 @@ public enum StorageTypeMapper {
     OSS(OSSProperties.class, S3FileSystem::new),
     OBS(OBSProperties.class, S3FileSystem::new),
     COS(COSProperties.class, S3FileSystem::new),
+    AZURE(AzureProperties.class, AzureFileSystem::new),
     S3(S3Properties.class, S3FileSystem::new);
 
     private final Class<? extends StorageProperties> propClass;
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java 
b/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java
index cda78ba8773..aa3fb767ced 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java
@@ -21,8 +21,7 @@ import org.apache.doris.backup.Status;
 import org.apache.doris.common.DdlException;
 import org.apache.doris.common.UserException;
 import org.apache.doris.common.util.S3URI;
-import org.apache.doris.datasource.property.PropertyConverter;
-import org.apache.doris.datasource.property.constants.S3Properties;
+import org.apache.doris.datasource.property.storage.AzureProperties;
 import org.apache.doris.fs.remote.RemoteFile;
 
 import com.azure.core.http.rest.PagedIterable;
@@ -56,21 +55,21 @@ import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
 
 public class AzureObjStorage implements ObjStorage<BlobServiceClient> {
     private static final Logger LOG = 
LogManager.getLogger(AzureObjStorage.class);
     private static final String URI_TEMPLATE = 
"https://%s.blob.core.windows.net";;
-    protected Map<String, String> properties;
+
+    protected AzureProperties azureProperties;
     private BlobServiceClient client;
-    private boolean isUsePathStyle = false;
+    private boolean isUsePathStyle;
 
-    private boolean forceParsingByStandardUri = false;
+    private boolean forceParsingByStandardUri;
 
-    public AzureObjStorage(Map<String, String> properties) {
-        this.properties = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
-        setProperties(properties);
+    public AzureObjStorage(AzureProperties azureProperties) {
+        this.azureProperties = azureProperties;
+        this.isUsePathStyle = 
Boolean.parseBoolean(azureProperties.getUsePathStyle());
+        this.forceParsingByStandardUri = 
Boolean.parseBoolean(azureProperties.getForceParsingByStandardUrl());
     }
 
     // To ensure compatibility with S3 usage, the path passed by the user 
still starts with 'S3://${containerName}'.
@@ -86,39 +85,13 @@ public class AzureObjStorage implements 
ObjStorage<BlobServiceClient> {
         return remotePath.substring(firstSlashIndex + 1);
     }
 
-    public Map<String, String> getProperties() {
-        return properties;
-    }
-
-    protected void setProperties(Map<String, String> properties) {
-        this.properties.putAll(properties);
-        try {
-            S3Properties.requiredS3Properties(this.properties);
-        } catch (DdlException e) {
-            throw new IllegalArgumentException(e);
-        }
-        // Virtual hosted-style is recommended in the s3 protocol.
-        // The path-style has been abandoned, but for some unexplainable 
reasons,
-        // the s3 client will determine whether the endpiont starts with `s3`
-        // when generating a virtual hosted-sytle request.
-        // If not, it will not be converted ( 
https://github.com/aws/aws-sdk-java-v2/pull/763),
-        // but the endpoints of many cloud service providers for object 
storage do not start with s3,
-        // so they cannot be converted to virtual hosted-sytle.
-        // Some of them, such as aliyun's oss, only support virtual 
hosted-style,
-        // and some of them(ceph) may only support
-        // path-style, so we need to do some additional conversion.
-        isUsePathStyle = 
this.properties.getOrDefault(PropertyConverter.USE_PATH_STYLE, "false")
-                .equalsIgnoreCase("true");
-        forceParsingByStandardUri = 
this.properties.getOrDefault(PropertyConverter.FORCE_PARSING_BY_STANDARD_URI,
-                "false").equalsIgnoreCase("true");
-    }
 
     @Override
     public BlobServiceClient getClient() throws UserException {
         if (client == null) {
-            String uri = String.format(URI_TEMPLATE, 
properties.get(S3Properties.ACCESS_KEY));
-            StorageSharedKeyCredential cred = new 
StorageSharedKeyCredential(properties.get(S3Properties.ACCESS_KEY),
-                    properties.get(S3Properties.SECRET_KEY));
+            String uri = String.format(URI_TEMPLATE, 
azureProperties.getAccessKey());
+            StorageSharedKeyCredential cred = new 
StorageSharedKeyCredential(azureProperties.getAccessKey(),
+                    azureProperties.getSecretKey());
             BlobServiceClientBuilder builder = new BlobServiceClientBuilder();
             builder.credential(cred);
             builder.endpoint(uri);
@@ -275,8 +248,8 @@ public class AzureObjStorage implements 
ObjStorage<BlobServiceClient> {
     public RemoteObjects listObjects(String remotePath, String 
continuationToken) throws DdlException {
         try {
             ListBlobsOptions options = new 
ListBlobsOptions().setPrefix(remotePath);
-            S3URI uri = S3URI.create(remotePath, isUsePathStyle, 
forceParsingByStandardUri);
-            PagedIterable<BlobItem> pagedBlobs = 
getClient().getBlobContainerClient(uri.getBucket())
+            //S3URI uri = S3URI.create(remotePath, isUsePathStyle, 
forceParsingByStandardUri);
+            PagedIterable<BlobItem> pagedBlobs = 
getClient().getBlobContainerClient("selectdb-qa-datalake-test")
                     .listBlobs(options, continuationToken, null);
             PagedResponse<BlobItem> pagedResponse = 
pagedBlobs.iterableByPage().iterator().next();
             List<RemoteObject> remoteObjects = new ArrayList<>();
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/AzureFileSystem.java 
b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/AzureFileSystem.java
index 097c64a744b..a30e2f09b36 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/fs/remote/AzureFileSystem.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/fs/remote/AzureFileSystem.java
@@ -20,28 +20,19 @@ package org.apache.doris.fs.remote;
 import org.apache.doris.analysis.StorageBackend.StorageType;
 import org.apache.doris.backup.Status;
 import org.apache.doris.common.UserException;
+import org.apache.doris.datasource.property.storage.AzureProperties;
 import org.apache.doris.fs.obj.AzureObjStorage;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.fs.FileSystem;
 
 import java.util.List;
-import java.util.Map;
 
 public class AzureFileSystem extends ObjFileSystem {
-    public AzureFileSystem(Map<String, String> properties) {
-        super(StorageType.AZURE.name(), StorageType.S3, new 
AzureObjStorage(properties));
-        initFsProperties();
-    }
-
-    @VisibleForTesting
-    public AzureFileSystem(AzureObjStorage storage) {
-        super(StorageType.AZURE.name(), StorageType.S3, storage);
-        initFsProperties();
-    }
 
-    private void initFsProperties() {
-        this.properties.putAll(((AzureObjStorage) objStorage).getProperties());
+    public AzureFileSystem(AzureProperties azureProperties) {
+        super(StorageType.AZURE.name(), StorageType.S3, new 
AzureObjStorage(azureProperties));
+        this.storageProperties = azureProperties;
+        this.properties.putAll(storageProperties.getOrigProps());
     }
 
     @Override
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/AzurePropertiesTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/AzurePropertiesTest.java
new file mode 100644
index 00000000000..bceefcc2640
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/AzurePropertiesTest.java
@@ -0,0 +1,115 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class AzurePropertiesTest {
+    private Map<String, String> origProps;
+
+    @BeforeEach
+    public void setUp() {
+        origProps = new HashMap<>();
+    }
+
+    @Test
+    public void testMissingRequiredFields() {
+        origProps.put("s3.endpoint", 
"https://mystorageaccount.blob.core.windows.net";);
+        origProps.put("provider", "azure");
+
+        origProps.put("s3.access_key", "myAzureAccessKey");
+
+        Assertions.assertThrows(IllegalArgumentException.class, () ->
+                StorageProperties.create(origProps), "Property s3.secret_key 
is required.");
+
+        origProps.put("s3.secret_key", "myAzureSecretKey");
+
+        // no exception expected
+        StorageProperties.create(origProps);
+    }
+
+    @Test
+    public void testToNativeConfiguration() {
+        origProps.put("s3.endpoint", "https://azure.blob.core.windows.net";);
+        origProps.put("s3.access_key", "myAzureAccessKey");
+        origProps.put("s3.secret_key", "myAzureSecretKey");
+        origProps.put("use_path_style", "true");
+        origProps.put("provider", "azure");
+
+        AzureProperties azureProperties = (AzureProperties) 
StorageProperties.createStorageProperties(origProps);
+        Map<String, String> nativeProps = 
azureProperties.getBackendConfigProperties();
+
+        Assertions.assertEquals("https://azure.blob.core.windows.net";, 
nativeProps.get("AWS_ENDPOINT"));
+        Assertions.assertEquals("dummy_region", nativeProps.get("AWS_REGION"));
+        Assertions.assertEquals("myAzureAccessKey", 
nativeProps.get("AWS_ACCESS_KEY"));
+        Assertions.assertEquals("myAzureSecretKey", 
nativeProps.get("AWS_SECRET_KEY"));
+        Assertions.assertEquals("true", 
nativeProps.get("AWS_NEED_OVERRIDE_ENDPOINT"));
+        Assertions.assertEquals("azure", nativeProps.get("provider"));
+        Assertions.assertEquals("true", nativeProps.get("use_path_style"));
+
+        // update use_path_style
+        origProps.put("use_path_style", "false");
+        azureProperties = (AzureProperties) 
StorageProperties.createStorageProperties(origProps);
+        nativeProps = azureProperties.getBackendConfigProperties();
+        Assertions.assertEquals("false", nativeProps.get("use_path_style"));
+    }
+
+    @Test
+    public void testConvertUrlToFilePath() throws Exception {
+        origProps.put("s3.endpoint", 
"https://mystorageaccount.blob.core.windows.net";);
+        origProps.put("s3.access_key", "a");
+        origProps.put("s3.secret_key", "b");
+        origProps.put("provider", "azure");
+
+        AzureProperties azureProperties = (AzureProperties) 
StorageProperties.createStorageProperties(origProps);
+        Assertions.assertEquals("s3://mystorageaccount/mycontainer/blob.txt",
+                
azureProperties.convertUrlToFilePath("https://mystorageaccount.blob.core.windows.net/mycontainer/blob.txt";));
+        origProps.put("use_path_style", "true");
+        azureProperties = (AzureProperties) 
StorageProperties.createStorageProperties(origProps);
+        Assertions.assertEquals("s3://mycontainer/blob.txt",
+                
azureProperties.convertUrlToFilePath("https://mystorageaccount.blob.core.windows.net/mycontainer/blob.txt";));
+    }
+
+    @Test
+    public void testCheckLoadPropsAndReturnUri() throws Exception {
+        origProps.put("s3.endpoint", "https://azure.blob.core.windows.net";);
+        origProps.put("s3.access_key", "a");
+        origProps.put("s3.secret_key", "b");
+        origProps.put("provider", "azure");
+        origProps.put(StorageProperties.FS_AZURE_SUPPORT, "true");
+
+        AzureProperties azureProperties = (AzureProperties) 
StorageProperties.createStorageProperties(origProps);
+
+        Map<String, String> loadProps = new HashMap<>();
+        loadProps.put("uri", "azure://mycontainer/blob.txt");
+
+        Assertions.assertEquals("azure://mycontainer/blob.txt", 
azureProperties.checkLoadPropsAndReturnUri(loadProps));
+    }
+
+    @Test
+    public void testGetStorageName() {
+        AzureProperties azureProperties = new AzureProperties(new HashMap<>(), 
"a", "b");
+        Assertions.assertEquals("S3", azureProperties.getStorageName());
+    }
+
+}
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/fs/obj/AzureObjStorageTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/fs/obj/AzureObjStorageTest.java
index f8869db9cf2..fe0d14a3ee4 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/fs/obj/AzureObjStorageTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/fs/obj/AzureObjStorageTest.java
@@ -97,7 +97,7 @@ public class AzureObjStorageTest {
 
         List<I> inputs = genInputs();
         inputs.stream().forEach(i -> {
-            AzureObjStorage azs = new AzureObjStorage(props);
+            AzureObjStorage azs = new AzureObjStorage(null);
             List<RemoteFile> result = new ArrayList<RemoteFile>();
             boolean fileNameOnly = false;
             // FIXME(gavin): Mock the result returned from azure blob to make 
this UT work when no aksk and network
@@ -199,7 +199,7 @@ public class AzureObjStorageTest {
         props.put(S3Properties.SECRET_KEY, "sksksksksksksk");
         props.put(S3Properties.ENDPOINT, "https://blob.azure.windows.net";);
         props.put(S3Properties.BUCKET, "gavin-test-us");
-        AzureObjStorage azs = new AzureObjStorage(props);
+        AzureObjStorage azs = new AzureObjStorage(null);
         List<String> allBlobKeys = genObjectKeys();
         final Integer[] batchIndex = {0}; // from 0 to numBatch
         new MockUp<AzureObjStorage>(AzureObjStorage.class) {
diff --git a/regression-test/conf/regression-conf.groovy 
b/regression-test/conf/regression-conf.groovy
index 52173421a3e..0a165aa27be 100644
--- a/regression-test/conf/regression-conf.groovy
+++ b/regression-test/conf/regression-conf.groovy
@@ -220,6 +220,7 @@ hudiEmrCatalog = "***********"
 
 enableObjStorageTest=false
 enableMaxComputeTest=false
+enableAzureBackupRestoreTest=false
 aliYunAk="***********"
 dlfUid="***********"
 aliYunSk="***********"
diff --git 
a/regression-test/suites/refactor_storage_param_p0/backup_restore_azure.groovy 
b/regression-test/suites/refactor_storage_param_p0/backup_restore_azure.groovy
new file mode 100644
index 00000000000..822c5fe7203
--- /dev/null
+++ 
b/regression-test/suites/refactor_storage_param_p0/backup_restore_azure.groovy
@@ -0,0 +1,182 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+import org.awaitility.Awaitility;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static groovy.test.GroovyAssert.shouldFail
+
+suite("refactor_storage_backup_restore_azure") {
+    
+    String enabled = 
context.config.otherConfigs.get("enableAzureBackupRestoreTest")
+    if (enabled == null || enabled.equalsIgnoreCase("false")) {
+        return ;
+    }
+    String objPrefix = "azure"
+    String container = context.config.otherConfigs.get("azure.container")
+    String account =context.config.otherConfigs.get("azure.account")
+    String s3_endpoint = "${account}.blob.core.windows.net"
+    String ak = context.config.otherConfigs.get("azure.ak")
+    String sk = context.config.otherConfigs.get("azure.sk")
+
+    def s3table = "test_backup_restore_azure";
+
+    def databaseQueryResult = sql """
+       select database();
+    """
+    println databaseQueryResult
+    def currentDBName = databaseQueryResult.get(0).get(0)
+    println currentDBName
+    // cos
+
+    def createDBAndTbl = { String dbName ->
+
+        sql """
+                drop database if exists ${dbName}
+            """
+
+        sql """
+            create database ${dbName}
+        """
+
+        sql """
+             use ${dbName}
+             """
+        sql """
+        CREATE TABLE ${s3table}(
+            user_id            BIGINT       NOT NULL COMMENT "user id",
+            name               VARCHAR(20)           COMMENT "name",
+            age                INT                   COMMENT "age"
+        )
+        DUPLICATE KEY(user_id)
+        DISTRIBUTED BY HASH(user_id) BUCKETS 10
+        PROPERTIES (
+            "replication_num" = "1"
+        );
+    """
+        sql """
+        insert into ${s3table} values (1, 'a', 10);
+    """
+
+        def insertResult = sql """
+        SELECT count(1) FROM ${s3table}
+    """
+
+        println "insertResult: ${insertResult}"
+
+        assert insertResult.get(0).get(0) == 1
+    }
+
+    def createRepository = { String repoName, String endpointName, String 
endpoint, String regionName, String region, String accessKeyName, String 
accessKey, String secretKeyName, String secretKey, String usePathStyle, String 
location ->
+        try {
+            sql """
+                drop repository  ${repoName};
+            """
+        } catch (Exception e) {
+            // ignore exception, repo may not exist
+        }
+
+        sql """
+            CREATE REPOSITORY  ${repoName}
+            WITH S3
+            ON LOCATION "${location}"
+            PROPERTIES (
+                "${endpointName}" = "${endpoint}",
+                "${regionName}" = "${region}",
+                "${accessKeyName}" = "${accessKey}",
+                "${secretKeyName}" = "${secretKey}",
+                "provider"="azure",
+                "use_path_style" = "${usePathStyle}"
+            );
+        """
+    }
+
+    def backupAndRestore = { String repoName, String dbName, String tableName, 
String backupLabel ->
+        sql """
+        BACKUP SNAPSHOT ${dbName}.${backupLabel}
+        TO ${repoName}
+        ON (${tableName})
+    """
+        Awaitility.await().atMost(60, SECONDS).pollInterval(5, SECONDS).until(
+                {
+                    def backupResult = sql """
+                show backup from ${dbName} where SnapshotName = 
'${backupLabel}';
+            """
+                    println "backupResult: ${backupResult}"
+                    return backupResult.get(0).get(3) == "FINISHED"
+                })
+
+        def querySnapshotResult = sql """
+        SHOW SNAPSHOT ON ${repoName} WHERE SNAPSHOT =  '${backupLabel}';
+        """
+        println querySnapshotResult
+        def snapshotTimes = querySnapshotResult.get(0).get(1).split('\n')
+        def snapshotTime = snapshotTimes[0]
+
+        sql """
+        drop table  if exists ${tableName}; 
+        """
+
+        sql """
+        RESTORE SNAPSHOT ${dbName}.${backupLabel}
+        FROM ${repoName}
+        ON (`${tableName}`)
+        PROPERTIES
+        (
+            "backup_timestamp"="${snapshotTime}",
+            "replication_num" = "1"
+        );
+        """
+        Awaitility.await().atMost(60, SECONDS).pollInterval(5, SECONDS).until(
+                {
+                    try {
+
+                        sql """
+                        use ${dbName}
+                        """
+                        def restoreResult = sql """
+                         SELECT count(1) FROM ${tableName}
+                        """
+                        println "restoreResult: ${restoreResult}"
+                        def count = restoreResult.get(0).get(0)
+                        println "count: ${count}"
+                        return restoreResult.get(0).get(0) == 1
+                    } catch (Exception e) {
+                        // tbl not found
+                        println "tbl not found" + e.getMessage()
+                        return false
+                    }
+                })
+    }
+
+
+    def s3repoName1 = "azure_repo_1"
+    createRepository("${s3repoName1}", "s3.endpoint", s3_endpoint, 
"s3.region", "", "s3.access_key", ak, "s3.secret_key", sk, "true", 
"s3://${container}/test_" + System.currentTimeMillis())
+
+    def dbName1 = currentDBName + "${objPrefix}_1"
+    createDBAndTbl("${dbName1}")
+    backupAndRestore("${s3repoName1}", dbName1, s3table, 
"backup_${s3repoName1}_test")
+    def s3repoName2 = "${objPrefix}_repo_2"
+    createRepository("${s3repoName2}", "s3.endpoint", s3_endpoint, 
"s3.region", "", "s3.access_key", ak, "s3.secret_key", sk, "true", 
"https://${s3_endpoint}/${container}/test_"; + System.currentTimeMillis())
+    def dbName2 = currentDBName + "${objPrefix}_2"
+    createDBAndTbl("${dbName2}")
+    backupAndRestore("${s3repoName2}", dbName2, s3table, 
"backup_${s3repoName2}_test")
+    String failedRepoName = "azure_failed_repo"
+    shouldFail {
+        createRepository("${failedRepoName}", "s3.endpoint", s3_endpoint, 
"s3.region", "", "s3.access_key", ak, "s3.secret_key", sk, "false", 
"https://${s3_endpoint}/${container}/test_"; + System.currentTimeMillis())
+    }
+
+
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org


Reply via email to