This is an automated email from the ASF dual-hosted git repository.

morrysnow pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new fcafd994579 branch-3.1: [feat](refactor-param) Add New Metastore 
Adaptation #50254 (#52434)
fcafd994579 is described below

commit fcafd9945791b3f76141fa6044377fd1b4c73ba0
Author: Calvin Kirs <[email protected]>
AuthorDate: Sat Jun 28 22:50:41 2025 +0800

    branch-3.1: [feat](refactor-param) Add New Metastore Adaptation #50254 
(#52434)
    
    cherry picked #50254
---
 .../doris/common/CatalogConfigFileUtils.java       |  56 ++++--
 .../property/metastore/AWSGlueProperties.java      | 131 ++++++++++++
 .../property/metastore/AliyunDLFProperties.java    | 116 +++++++++++
 .../property/metastore/DataProcProperties.java     |  37 ++++
 .../metastore/FileMetastoreProperties.java         |  46 +++++
 .../property/metastore/HMSProperties.java          | 140 +++++++++++++
 .../property/metastore/IcebergRestProperties.java  |  56 ++++++
 .../property/metastore/MetastoreProperties.java    | 181 +++++++++++++++++
 .../property/metastore/AWSGluePropertiesTest.java  | 172 ++++++++++++++++
 .../datasource/property/metastore/AWSTest.java     | 101 ++++++++++
 .../metastore/AliyunDLFPropertiesTest.java         | 130 ++++++++++++
 .../property/metastore/GlueCatalogTest.java        | 110 ++++++++++
 .../property/metastore/HMSIntegrationTest.java     | 222 +++++++++++++++++++++
 .../property/metastore/HMSPropertiesTest.java      | 122 +++++++++++
 14 files changed, 1598 insertions(+), 22 deletions(-)

diff --git 
a/fe/fe-common/src/main/java/org/apache/doris/common/CatalogConfigFileUtils.java
 
b/fe/fe-common/src/main/java/org/apache/doris/common/CatalogConfigFileUtils.java
index 6a1c552d972..bce08713131 100644
--- 
a/fe/fe-common/src/main/java/org/apache/doris/common/CatalogConfigFileUtils.java
+++ 
b/fe/fe-common/src/main/java/org/apache/doris/common/CatalogConfigFileUtils.java
@@ -24,22 +24,24 @@ import org.apache.hadoop.hive.conf.HiveConf;
 
 import java.io.File;
 import java.util.function.BiConsumer;
+import java.util.function.Supplier;
 
 public class CatalogConfigFileUtils {
 
     /**
-     * Loads configuration files from the specified directory into a Hadoop 
Configuration or HiveConf object.
+     * Generic method to load configuration files (e.g., Hadoop or Hive) from 
a directory.
      *
-     * @param resourcesPath The comma-separated list of configuration resource 
files to load.
-     *                      This must not be null or empty.
-     * @param configDir The base directory where the configuration files are 
located.
-     * @param addResourceMethod A method reference to add the resource to the 
configuration.
-     * @param <T> The type of configuration object (either Hadoop 
Configuration or HiveConf).
-     * @return The populated configuration object.
-     * @throws IllegalArgumentException If the provided resourcesPath is 
blank, or if any of the specified
-     *                                  configuration files do not exist or 
are not regular files.
+     * @param resourcesPath     Comma-separated list of resource file names to 
be loaded.
+     * @param configDir         Directory prefix where the configuration files 
reside.
+     * @param configSupplier    Supplier that creates a new configuration 
object
+     *                          (e.g., new Configuration or new HiveConf).
+     * @param addResourceMethod Method to add a resource file to the 
configuration object.
+     * @param <T>               Type of the configuration (e.g., Configuration 
or HiveConf).
+     * @return A configuration object loaded with the given resource files.
+     * @throws IllegalArgumentException if the resourcesPath is empty or if 
any file does not exist.
      */
     private static <T> T loadConfigFromDir(String resourcesPath, String 
configDir,
+                                           Supplier<T> configSupplier,
                                            BiConsumer<T, Path> 
addResourceMethod) {
         // Check if the provided resourcesPath is blank and throw an exception 
if so.
         if (StringUtils.isBlank(resourcesPath)) {
@@ -47,7 +49,7 @@ public class CatalogConfigFileUtils {
         }
 
         // Create a new configuration object.
-        T conf = (T) (configDir.equals(Config.hadoop_config_dir) ? new 
Configuration(false) : new HiveConf());
+        T conf = configSupplier.get();
 
         // Iterate over the comma-separated list of resource files.
         for (String resource : resourcesPath.split(",")) {
@@ -68,24 +70,34 @@ public class CatalogConfigFileUtils {
     }
 
     /**
-     * Loads the Hadoop configuration files from the specified directory.
-     * @param resourcesPath The comma-separated list of Hadoop configuration 
resource files to load.
-     * @return The Hadoop `Configuration` object with the loaded configuration 
files.
-     * @throws IllegalArgumentException If the provided `resourcesPath` is 
blank, or if any of the specified
-     *                                  configuration files do not exist or 
are not regular files.
+     * Loads a Hadoop Configuration object from a list of files under the 
specified config directory.
+     *
+     * @param resourcesPath Comma-separated list of file names to be loaded.
+     * @return A Hadoop Configuration object.
+     * @throws IllegalArgumentException if the input is invalid or files are 
missing.
      */
     public static Configuration loadConfigurationFromHadoopConfDir(String 
resourcesPath) {
-        return loadConfigFromDir(resourcesPath, Config.hadoop_config_dir, 
Configuration::addResource);
+        return loadConfigFromDir(
+                resourcesPath,
+                Config.hadoop_config_dir,
+                Configuration::new,
+                Configuration::addResource
+        );
     }
 
     /**
-     * Loads the Hive configuration files from the specified directory.
-     * @param resourcesPath The comma-separated list of Hive configuration 
resource files to load.
-     * @return The HiveConf object with the loaded configuration files.
-     * @throws IllegalArgumentException If the provided `resourcesPath` is 
blank, or if any of the specified
-     *                                  configuration files do not exist or 
are not regular files.
+     * Loads a HiveConf object from a list of files under the specified config 
directory.
+     *
+     * @param resourcesPath Comma-separated list of file names to be loaded.
+     * @return A HiveConf object.
+     * @throws IllegalArgumentException if the input is invalid or files are 
missing.
      */
     public static HiveConf loadHiveConfFromHiveConfDir(String resourcesPath) {
-        return loadConfigFromDir(resourcesPath, Config.hadoop_config_dir, 
HiveConf::addResource);
+        return loadConfigFromDir(
+                resourcesPath,
+                Config.hadoop_config_dir,
+                HiveConf::new,
+                HiveConf::addResource
+        );
     }
 }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/AWSGlueProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/AWSGlueProperties.java
new file mode 100644
index 00000000000..2be15b05f15
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/AWSGlueProperties.java
@@ -0,0 +1,131 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import org.apache.doris.datasource.property.ConnectorProperty;
+
+import com.google.common.collect.Maps;
+import lombok.Getter;
+import org.apache.commons.lang3.StringUtils;
+
+import java.util.Map;
+import java.util.regex.Pattern;
+
+public class AWSGlueProperties extends MetastoreProperties {
+
+    @ConnectorProperty(names = {"glue.endpoint", "aws.endpoint", 
"aws.glue.endpoint"},
+            description = "The endpoint of the AWS Glue.")
+    private String glueEndpoint = "";
+
+    @ConnectorProperty(names = {"glue.access_key",
+            "aws.glue.access-key", 
"client.credentials-provider.glue.access_key"},
+            description = "The access key of the AWS Glue.")
+    private String glueAccessKey = "";
+
+    @ConnectorProperty(names = {"glue.secret_key",
+            "aws.glue.secret-key", 
"client.credentials-provider.glue.secret_key"},
+            description = "The secret key of the AWS Glue.")
+    private String glueSecretKey = "";
+
+    @ConnectorProperty(names = {"glue.catalog_id"},
+            description = "The catalog id of the AWS Glue.",
+            supported = false)
+    private String glueCatalogId = "";
+
+    @ConnectorProperty(names = {"glue.iam_role"},
+            description = "The IAM role the AWS Glue.",
+            supported = false)
+    private String glueIAMRole = "";
+
+    @ConnectorProperty(names = {"glue.external_id"},
+            description = "The external id of the AWS Glue.",
+            supported = false)
+    private String glueExternalId = "";
+
+    public AWSGlueProperties(Map<String, String> origProps) {
+        super(Type.GLUE, origProps);
+    }
+
+    /**
+     * The pattern of the AWS Glue endpoint.
+     * FYI: https://docs.aws.amazon.com/general/latest/gr/glue.html#glue_region
+     * eg:
+     * glue.us-east-1.amazonaws.com↳
+     * <p>
+     * glue-fips.us-east-1.api.aws
+     * <p>
+     * glue-fips.us-east-1.amazonaws.com
+     * <p>
+     * glue.us-east-1.api.aws
+     */
+    private static final Pattern ENDPOINT_PATTERN = Pattern.compile(
+            
"^(https?://)?(glue|glue-fips)\\.[a-z0-9-]+\\.(api\\.aws|amazonaws\\.com)$"
+    );
+
+    @Override
+    protected void checkRequiredProperties() {
+        if (StringUtils.isBlank(glueAccessKey)
+                || StringUtils.isBlank(glueSecretKey)
+                || StringUtils.isBlank(glueEndpoint)) {
+            throw new IllegalArgumentException("AWS Glue 
properties(glue.access_key, glue.secret_key, glue.endpoint) "
+                    + "are not set correctly.");
+        }
+        checkGlueEndpoint();
+    }
+
+    private void checkGlueEndpoint() {
+        if (!ENDPOINT_PATTERN.matcher(glueEndpoint).matches()) {
+            throw new IllegalArgumentException("AWS Glue properties 
(glue.endpoint) are not set correctly: "
+                    + glueEndpoint);
+        }
+    }
+
+    public AWSCatalogMetastoreClientCredentials 
getAWSCatalogMetastoreClientCredentials() {
+        return new AWSCatalogMetastoreClientCredentials(glueEndpoint, 
glueAccessKey, glueSecretKey);
+    }
+
+    public void toIcebergGlueCatalogProperties(Map<String, String> 
catalogProps) {
+        // See AwsClientProperties.java for property keys
+        catalogProps.put("client.credentials-provider",
+                
"com.amazonaws.glue.catalog.credentials.ConfigurationAWSCredentialsProvider2x");
+        catalogProps.put("client.credentials-provider.glue.access_key", 
glueAccessKey);
+        catalogProps.put("client.credentials-provider.glue.secret_key", 
glueSecretKey);
+        catalogProps.put("client.region", getRegionFromGlueEndpoint());
+    }
+
+    private String getRegionFromGlueEndpoint() {
+        // https://glue.ap-northeast-1.amazonaws.com
+        // -> ap-northeast-1
+        return glueEndpoint.split("\\.")[1];
+    }
+
+    @Getter
+    public static class AWSCatalogMetastoreClientCredentials {
+        private Map<String, String> credentials = Maps.newHashMap();
+
+        // Used for AWSCatalogMetastoreClient
+        // See AWSGlueClientFactory in AWSCatalogMetastoreClient.java
+        public AWSCatalogMetastoreClientCredentials(String endpoint, String 
ak, String sk) {
+            credentials.put("aws.catalog.credentials.provider.factory.class",
+                    
"com.amazonaws.glue.catalog.credentials.ConfigurationAWSCredentialsProviderFactory");
+            credentials.put("aws.glue.access-key", ak);
+            credentials.put("aws.glue.secret-key", sk);
+            credentials.put("aws.glue.endpoint", endpoint);
+        }
+    }
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/AliyunDLFProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/AliyunDLFProperties.java
new file mode 100644
index 00000000000..c0096baddc4
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/AliyunDLFProperties.java
@@ -0,0 +1,116 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import org.apache.doris.common.UserException;
+import org.apache.doris.datasource.property.ConnectorProperty;
+
+import com.google.common.base.Strings;
+import com.google.common.collect.Maps;
+import lombok.Getter;
+import org.apache.paimon.options.Options;
+
+import java.util.Map;
+
+public class AliyunDLFProperties extends MetastoreProperties {
+
+    @ConnectorProperty(names = {"dlf.access_key", "dlf.catalog.accessKeyId"},
+            description = "The access key of the Aliyun DLF.")
+    public String dlfAccessKey = "";
+
+    @ConnectorProperty(names = {"dlf.secret_key", 
"dlf.catalog.accessKeySecret"},
+            description = "The secret key of the Aliyun DLF.")
+    private String dlfSecretKey = "";
+
+    @ConnectorProperty(names = {"dlf.region"},
+            description = "The region of the Aliyun DLF.")
+    private String dlfRegion = "";
+
+    @ConnectorProperty(names = {"dlf.endpoint", "dlf.catalog.endpoint"},
+            required = false,
+            description = "The region of the Aliyun DLF.")
+    private String dlfEndpoint = "";
+
+    @ConnectorProperty(names = {"dlf.uid", "dlf.catalog.uid"},
+            description = "The uid of the Aliyun DLF.")
+    private String dlfUid = "";
+
+    @ConnectorProperty(names = {"dlf.access.public", 
"dlf.catalog.accessPublic"},
+            required = false,
+            description = "Enable public access to Aliyun DLF.")
+    private String dlfAccessPublic = "false";
+
+    private static final String DLF_PREFIX = "dlf.";
+
+    @Getter
+    private final Map<String, String> otherDlfProps = Maps.newHashMap();
+
+    private Map<String, String> dlfConnectProps = Maps.newHashMap();
+
+    public AliyunDLFProperties(Map<String, String> origProps) {
+        super(Type.DLF, origProps);
+    }
+
+    @Override
+    protected void initNormalizeAndCheckProps() throws UserException {
+        super.initNormalizeAndCheckProps();
+        // Other properties that start with "dlf." will be saved in 
otherDlfProps,
+        // and passed to the DLF client.
+        for (Map.Entry<String, String> entry : origProps.entrySet()) {
+            if (entry.getKey().startsWith(DLF_PREFIX) && 
!matchedProperties.containsKey(entry.getKey())) {
+                otherDlfProps.put(entry.getKey(), entry.getValue());
+            }
+        }
+        initDlfConnectProps();
+    }
+
+    private void initDlfConnectProps() {
+        dlfConnectProps.put("dlf.catalog.region", dlfRegion);
+        dlfConnectProps.put("dlf.catalog.endpoint", 
getEndpointOrFromRegion(dlfEndpoint, dlfRegion, dlfAccessPublic));
+        dlfConnectProps.put("dlf.catalog.proxyMode", "DLF_ONLY");
+        dlfConnectProps.put("dlf.catalog.accessKeyId", dlfAccessKey);
+        dlfConnectProps.put("dlf.catalog.accessKeySecret", dlfSecretKey);
+        dlfConnectProps.put("dlf.catalog.accessPublic", dlfAccessPublic);
+        dlfConnectProps.put("dlf.catalog.uid", dlfUid);
+        dlfConnectProps.put("dlf.catalog.createDefaultDBIfNotExist", "false");
+        otherDlfProps.forEach((dlfConnectProps::put));
+    }
+
+    public void toPaimonOptions(Options options) {
+        // See DataLakeConfig.java for property keys
+        dlfConnectProps.forEach(options::set);
+    }
+
+    private String getEndpointOrFromRegion(String endpoint, String region, 
String dlfAccessPublic) {
+        if (!Strings.isNullOrEmpty(endpoint)) {
+            return endpoint;
+        } else {
+            // 
https://www.alibabacloud.com/help/en/dlf/dlf-1-0/regions-and-endpoints
+            if ("true".equalsIgnoreCase(dlfAccessPublic)) {
+                return "dlf." + region + ".aliyuncs.com";
+            } else {
+                return "dlf-vpc." + region + ".aliyuncs.com";
+            }
+        }
+    }
+
+    @Override
+    protected String getResourceConfigPropName() {
+        return "dlf.resource_config";
+    }
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/DataProcProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/DataProcProperties.java
new file mode 100644
index 00000000000..2007fd59801
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/DataProcProperties.java
@@ -0,0 +1,37 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import org.apache.doris.datasource.property.ConnectorProperty;
+
+import java.util.Map;
+
+public class DataProcProperties extends MetastoreProperties {
+    @ConnectorProperty(names = {"hive.metastore.uri"},
+            description = "The uri of the hive metastore in DataProc")
+    private String hiveMetastoreUri = "";
+
+    public DataProcProperties(Map<String, String> origProps) {
+        super(Type.DATAPROC, origProps);
+    }
+
+    @Override
+    protected void checkRequiredProperties() {
+
+    }
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/FileMetastoreProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/FileMetastoreProperties.java
new file mode 100644
index 00000000000..e4c4800b2a6
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/FileMetastoreProperties.java
@@ -0,0 +1,46 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import org.apache.doris.common.UserException;
+import org.apache.doris.datasource.property.storage.StorageProperties;
+
+import java.util.List;
+import java.util.Map;
+
+public class FileMetastoreProperties extends MetastoreProperties {
+
+    private List<StorageProperties> storageProperties;
+
+    public FileMetastoreProperties(Map<String, String> origProps) {
+        super(Type.FILE_SYSTEM, origProps);
+        try {
+            storageProperties = StorageProperties.createAll(origProps);
+        } catch (UserException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    public List<StorageProperties> getStorageProperties() {
+        return storageProperties;
+    }
+
+    @Override
+    protected void checkRequiredProperties() {
+    }
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/HMSProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/HMSProperties.java
new file mode 100644
index 00000000000..b7c18a3e74a
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/HMSProperties.java
@@ -0,0 +1,140 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import org.apache.doris.common.CatalogConfigFileUtils;
+import org.apache.doris.common.UserException;
+import org.apache.doris.datasource.property.ConnectorProperty;
+
+import com.google.common.base.Strings;
+import com.google.common.collect.Maps;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.paimon.options.Options;
+
+import java.util.HashMap;
+import java.util.Map;
+
+@Slf4j
+public class HMSProperties extends MetastoreProperties {
+
+    @ConnectorProperty(names = {"hive.metastore.uris"},
+            description = "The uri of the hive metastore.")
+    private String hiveMetastoreUri = "";
+
+    @ConnectorProperty(names = {"hive.metastore.authentication.type"},
+            required = false,
+            description = "The authentication type of the hive metastore.")
+    private String hiveMetastoreAuthenticationType = "none";
+
+    @ConnectorProperty(names = {"hive.conf.resources"},
+            required = false,
+            description = "The conf resources of the hive metastore.")
+    private String hiveConfResourcesConfig = "";
+
+    @ConnectorProperty(names = {"hive.metastore.service.principal"},
+            required = false,
+            description = "The service principal of the hive metastore.")
+    private String hiveMetastoreServicePrincipal = "";
+
+    @ConnectorProperty(names = {"hive.metastore.client.principal"},
+            required = false,
+            description = "The client principal of the hive metastore.")
+    private String hiveMetastoreClientPrincipal = "";
+
+    @ConnectorProperty(names = {"hive.metastore.client.keytab"},
+            required = false,
+            description = "The client keytab of the hive metastore.")
+    private String hiveMetastoreClientKeytab = "";
+
+    private Map<String, String> hiveConfParams;
+
+    private Map<String, String> hmsConnectionProperties;
+
+    public HMSProperties(Map<String, String> origProps) {
+        super(Type.HMS, origProps);
+    }
+
+    @Override
+    protected String getResourceConfigPropName() {
+        return "hive.conf.resources";
+    }
+
+    @Override
+    protected void checkRequiredProperties() {
+        super.checkRequiredProperties();
+        if (!Strings.isNullOrEmpty(hiveConfResourcesConfig)) {
+            checkHiveConfResourcesConfig();
+        }
+        if ("kerberos".equalsIgnoreCase(hiveMetastoreAuthenticationType)) {
+            if (Strings.isNullOrEmpty(hiveMetastoreServicePrincipal)
+                    || Strings.isNullOrEmpty(hiveMetastoreClientPrincipal)
+                    || Strings.isNullOrEmpty(hiveMetastoreClientKeytab)) {
+                throw new IllegalArgumentException("Hive metastore 
authentication type is kerberos, "
+                        + "but service principal, client principal or client 
keytab is not set.");
+            }
+        }
+        if (Strings.isNullOrEmpty(hiveMetastoreUri)) {
+            throw new IllegalArgumentException("Hive metastore uri is 
required.");
+        }
+    }
+
+    @Override
+    protected void initNormalizeAndCheckProps() throws UserException {
+        super.initNormalizeAndCheckProps();
+        hiveConfParams = loadConfigFromFile(getResourceConfigPropName());
+        initHmsConnectionProperties();
+    }
+
+    private void initHmsConnectionProperties() {
+        hmsConnectionProperties = new HashMap<>();
+        hmsConnectionProperties.putAll(hiveConfParams);
+        hmsConnectionProperties.put("hive.metastore.authentication.type", 
hiveMetastoreAuthenticationType);
+        if ("kerberos".equalsIgnoreCase(hiveMetastoreAuthenticationType)) {
+            hmsConnectionProperties.put("hive.metastore.service.principal", 
hiveMetastoreServicePrincipal);
+            hmsConnectionProperties.put("hive.metastore.client.principal", 
hiveMetastoreClientPrincipal);
+            hmsConnectionProperties.put("hive.metastore.client.keytab", 
hiveMetastoreClientKeytab);
+        }
+        hmsConnectionProperties.put("uri", hiveMetastoreUri);
+    }
+
+    private void checkHiveConfResourcesConfig() {
+        loadConfigFromFile(getResourceConfigPropName());
+    }
+
+    public void toPaimonOptionsAndConf(Options options) {
+        hmsConnectionProperties.forEach(options::set);
+    }
+
+    public void toIcebergHiveCatalogProperties(Map<String, String> 
catalogProps) {
+        hmsConnectionProperties.forEach(catalogProps::put);
+    }
+
+    protected Map<String, String> loadConfigFromFile(String resourceConfig) {
+        if (Strings.isNullOrEmpty(origProps.get(resourceConfig))) {
+            return Maps.newHashMap();
+        }
+        HiveConf conf = 
CatalogConfigFileUtils.loadHiveConfFromHiveConfDir(origProps.get(resourceConfig));
+        Map<String, String> confMap = Maps.newHashMap();
+        for (Map.Entry<String, String> entry : conf) {
+            confMap.put(entry.getKey(), entry.getValue());
+        }
+        return confMap;
+    }
+
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergRestProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergRestProperties.java
new file mode 100644
index 00000000000..987aa0d519b
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergRestProperties.java
@@ -0,0 +1,56 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import org.apache.doris.datasource.property.ConnectorProperty;
+
+import java.util.Map;
+
+public class IcebergRestProperties extends MetastoreProperties {
+
+    @ConnectorProperty(names = {"iceberg.rest.uri"},
+            description = "The uri of the iceberg rest catalog service.")
+    private String icebergRestUri = "";
+
+    @ConnectorProperty(names = {"iceberg.rest.security.type"},
+            required = false,
+            supported = false,
+            description = "The security type of the iceberg rest catalog 
service.")
+    private String icebergRestSecurityType = "none";
+
+    @ConnectorProperty(names = {"iceberg.rest.prefix"},
+            required = false,
+            supported = false,
+            description = "The prefix of the iceberg rest catalog service.")
+    private String icebergRestPrefix = "";
+
+    public IcebergRestProperties(Map<String, String> origProps) {
+        super(Type.ICEBERG_REST, origProps);
+    }
+
+    @Override
+    protected void checkRequiredProperties() {
+    }
+
+    public void toIcebergRestCatalogProperties(Map<String, String> 
catalogProps) {
+        // See CatalogUtil.java
+        catalogProps.put("type", "rest");
+        // See CatalogProperties.java
+        catalogProps.put("uri", icebergRestUri);
+    }
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/MetastoreProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/MetastoreProperties.java
new file mode 100644
index 00000000000..69484de2542
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/MetastoreProperties.java
@@ -0,0 +1,181 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import org.apache.doris.common.UserException;
+import org.apache.doris.datasource.property.ConnectionProperties;
+
+import lombok.Getter;
+
+import java.util.Arrays;
+import java.util.EnumMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.function.Function;
+
+/**
+ * MetastoreProperties is the base class for handling configuration of 
different types of metastores
+ * such as Hive Metastore (HMS), AWS Glue, Aliyun DLF, Iceberg REST catalog, 
Google Dataproc,
+ * or file-based metastores (like Hadoop).
+ * <p>
+ * It uses a simple factory pattern based on a registry to dynamically 
instantiate the correct
+ * subclass according to the provided configuration.
+ * <p>
+ * Supported metastore types are defined in the {@link Type} enum. Multiple 
alias names can be mapped to each type.
+ */
+public class MetastoreProperties extends ConnectionProperties {
+
+    /**
+     * Enum representing supported metastore types.
+     * Each type can have one or more alias strings (case-insensitive).
+     */
+    public enum Type {
+        HMS("hms"),
+        GLUE("glue"),
+        DLF("dlf"),
+        ICEBERG_REST("rest"),
+        DATAPROC("dataproc"),
+        FILE_SYSTEM("filesystem", "hadoop"),
+        UNKNOWN(); // fallback, not used directly
+
+        private final Set<String> aliases;
+
+        Type(String... aliases) {
+            this.aliases = new HashSet<>(Arrays.asList(aliases));
+        }
+
+        /**
+         * Parses a string into a {@link Type} if possible.
+         *
+         * @param input string value (case-insensitive)
+         * @return optional type if match found
+         */
+        public static Optional<Type> fromString(String input) {
+            if (input == null) {
+                return Optional.empty();
+            }
+            String normalized = input.trim().toLowerCase(Locale.ROOT);
+            for (Type type : values()) {
+                if (type.aliases.contains(normalized)) {
+                    return Optional.of(type);
+                }
+            }
+            return Optional.empty();
+        }
+    }
+
+    /**
+     * The resolved metastore type for this configuration.
+     */
+    @Getter
+    protected Type type;
+
+    /**
+     * Common property keys that may specify the metastore type.
+     * These are checked in order to resolve the type from provided config.
+     */
+    private static final List<String> POSSIBLE_TYPE_KEYS = Arrays.asList(
+            "metastore.type",
+            "hive.metastore.type",
+            "iceberg.catalog.type",
+            "paimon.catalog.type",
+            "type"
+    );
+
+    /**
+     * Registry mapping each {@link Type} to its constructor logic.
+     */
+    private static final Map<Type, Function<Map<String, String>, 
MetastoreProperties>> FACTORY_MAP
+            = new EnumMap<>(Type.class);
+
+    static {
+        // Register all known factories here
+        FACTORY_MAP.put(Type.HMS, HMSProperties::new);
+        FACTORY_MAP.put(Type.GLUE, AWSGlueProperties::new);
+        FACTORY_MAP.put(Type.DLF, AliyunDLFProperties::new);
+        FACTORY_MAP.put(Type.ICEBERG_REST, IcebergRestProperties::new);
+        FACTORY_MAP.put(Type.DATAPROC, DataProcProperties::new);
+        FACTORY_MAP.put(Type.FILE_SYSTEM, FileMetastoreProperties::new);
+    }
+
+    /**
+     * Factory method to create an appropriate {@link MetastoreProperties} 
instance from raw properties.
+     *
+     * @param origProps original user configuration
+     * @return resolved and initialized metastore properties instance
+     * @throws UserException if the configuration is invalid or unsupported
+     */
+    public static MetastoreProperties create(Map<String, String> origProps) 
throws UserException {
+        Type msType = resolveType(origProps);
+        return create(msType, origProps);
+    }
+
+    /**
+     * Resolves the {@link Type} of metastore from the property map by 
checking common keys.
+     *
+     * @param props original property map
+     * @return resolved type
+     */
+    private static Type resolveType(Map<String, String> props) {
+        for (String key : POSSIBLE_TYPE_KEYS) {
+            if (props.containsKey(key)) {
+                String value = props.get(key);
+                Optional<Type> opt = Type.fromString(value);
+                if (opt.isPresent()) {
+                    return opt.get();
+                } else {
+                    throw new IllegalArgumentException("Unknown metastore type 
value '" + value + "' for key: " + key);
+                }
+            }
+        }
+        throw new IllegalArgumentException("No metastore type found in 
properties. Tried keys: " + POSSIBLE_TYPE_KEYS);
+    }
+
+    /**
+     * Factory method to directly create a metastore properties instance given 
a type.
+     *
+     * @param type      resolved type
+     * @param origProps original configuration
+     * @return constructed and validated {@link MetastoreProperties}
+     * @throws UserException if validation fails
+     */
+    public static MetastoreProperties create(Type type, Map<String, String> 
origProps) throws UserException {
+        Function<Map<String, String>, MetastoreProperties> constructor = 
FACTORY_MAP.get(type);
+        if (constructor == null) {
+            throw new IllegalArgumentException("Unsupported metastore type: " 
+ type);
+        }
+        MetastoreProperties instance = constructor.apply(origProps);
+        instance.initNormalizeAndCheckProps();
+        return instance;
+    }
+
+    /**
+     * Base constructor for subclasses to initialize the common state.
+     *
+     * @param type      metastore type
+     * @param origProps original configuration
+     */
+    protected MetastoreProperties(Type type, Map<String, String> origProps) {
+        super(origProps);
+        this.type = type;
+    }
+}
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/AWSGluePropertiesTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/AWSGluePropertiesTest.java
new file mode 100644
index 00000000000..c4500712642
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/AWSGluePropertiesTest.java
@@ -0,0 +1,172 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import org.apache.doris.common.UserException;
+
+import com.google.common.collect.Maps;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class AWSGluePropertiesTest {
+    private static Map<String, String> baseProps = new HashMap<>();
+
+    @BeforeAll
+    public static void setUp() {
+        baseProps.put("iceberg.catalog.type", "glue");
+        baseProps.put("type", "iceberg");
+    }
+
+    @Test
+    public void testBasicProperties() throws UserException {
+        Map<String, String> props = baseProps;
+        props.put("glue.access_key", "test_access_key");
+        props.put("glue.secret_key", "test_secret_key");
+
+        props.put("glue.endpoint", 
"https://glue.ap-northeast-1.amazonaws.com";);
+
+        AWSGlueProperties glueProperties = (AWSGlueProperties) 
MetastoreProperties.create(props);
+
+        Map<String, String> catalogProps = new HashMap<>();
+        glueProperties.toIcebergGlueCatalogProperties(catalogProps);
+        
Assertions.assertEquals("com.amazonaws.glue.catalog.credentials.ConfigurationAWSCredentialsProvider2x",
 catalogProps
+                .get("client.credentials-provider"));
+        Assertions.assertEquals(props.get("glue.access_key"), catalogProps
+                .get("client.credentials-provider.glue.access_key"));
+        Assertions.assertEquals(props.get("glue.secret_key"), catalogProps
+                .get("client.credentials-provider.glue.secret_key"));
+        Assertions.assertEquals("ap-northeast-1", catalogProps
+                .get("client.region"));
+        AWSGlueProperties.AWSCatalogMetastoreClientCredentials 
awsCatalogMetastoreClientCredentials = 
glueProperties.getAWSCatalogMetastoreClientCredentials();
+        Map<String, String> credentials = 
awsCatalogMetastoreClientCredentials.getCredentials();
+        Assertions.assertEquals("test_access_key", 
credentials.get("aws.glue.access-key"));
+        Assertions.assertEquals("test_secret_key", 
credentials.get("aws.glue.secret-key"));
+        Assertions.assertEquals("https://glue.ap-northeast-1.amazonaws.com";, 
credentials.get("aws.glue.endpoint"));
+        //Test glue.endpoint
+        props = new HashMap<>();
+        props.put("type", "hms");
+        props.put("hive.metastore.type", "glue");
+        props.put("glue.endpoint", 
"https://glue.ap-northeast-1.amazonaws.com";);
+        props.put("aws.glue.secret-key", "test_secret_key");
+        props.put("aws.glue.access-key", "test_access_key");
+        glueProperties = (AWSGlueProperties) MetastoreProperties.create(props);
+        catalogProps = new HashMap<>();
+        glueProperties.toIcebergGlueCatalogProperties(catalogProps);
+        Assertions.assertEquals("ap-northeast-1", 
catalogProps.get("client.region"));
+        Assertions.assertEquals("test_access_key", 
catalogProps.get("client.credentials-provider.glue.access_key"));
+        Assertions.assertEquals("test_secret_key", 
catalogProps.get("client.credentials-provider.glue.secret_key"));
+        props = new HashMap<>();
+        props.put("type", "hms");
+        props.put("hive.metastore.type", "glue");
+        props.put("glue.endpoint", 
"https://glue.ap-northeast-1.amazonaws.com";);
+        props.put("aws.glue.secret-key", "test_secret_key");
+        props.put("glue.access_key", "test_glue_access_key");
+        glueProperties = (AWSGlueProperties) MetastoreProperties.create(props);
+        catalogProps = new HashMap<>();
+        glueProperties.toIcebergGlueCatalogProperties(catalogProps);
+        Assertions.assertEquals("ap-northeast-1", 
catalogProps.get("client.region"));
+        Assertions.assertEquals("test_secret_key", 
catalogProps.get("client.credentials-provider.glue.secret_key"));
+        Assertions.assertEquals("test_glue_access_key", 
catalogProps.get("client.credentials-provider.glue.access_key"));
+    }
+
+    @Test
+    public void testMissingRequiredProperties() {
+        Map<String, String> props = Maps.newHashMap();
+        Assertions.assertThrowsExactly(IllegalArgumentException.class, () -> {
+            MetastoreProperties.create(MetastoreProperties.Type.GLUE, props);
+        });
+        props.put("glue.access_key", "test_access_key");
+        Assertions.assertThrowsExactly(IllegalArgumentException.class, () -> {
+            MetastoreProperties.create(MetastoreProperties.Type.GLUE, props);
+        });
+        props.put("glue.secret_key", "test_secret_key");
+        Assertions.assertThrowsExactly(IllegalArgumentException.class, () -> {
+            MetastoreProperties.create(MetastoreProperties.Type.GLUE, props);
+        });
+    }
+
+    @Test
+    public void testEmptyRequiredProperty() {
+        Map<String, String> props = Maps.newHashMap();
+        props.put("glue.access_key", " ");
+        props.put("glue.secret_key", "test_secret_key");
+        props.put("glue.endpoint", 
"https://glue.ap-northeast-1.amazonaws.com";);
+
+        Assertions.assertThrowsExactly(IllegalArgumentException.class, () -> {
+            MetastoreProperties.create(MetastoreProperties.Type.GLUE, props);
+        }, "AWS Glue properties(glue.access_key, glue.secret_key, 
glue.endpoint) are not set correctly.");
+        props.put("glue.access_key", "");
+        Assertions.assertThrowsExactly(IllegalArgumentException.class, () -> {
+            MetastoreProperties.create(MetastoreProperties.Type.GLUE, props);
+        }, "AWS Glue properties(glue.access_key, glue.secret_key, 
glue.endpoint) are not set correctly.");
+        props.put("glue.access_key", "test_access_key");
+        props.put("glue.secret_key", " ");
+        Assertions.assertThrowsExactly(IllegalArgumentException.class, () -> {
+            MetastoreProperties.create(MetastoreProperties.Type.GLUE, props);
+        }, "AWS Glue properties(glue.access_key, glue.secret_key, 
glue.endpoint) are not set correctly.");
+    }
+
+    @Test
+    public void testEndpointParams() throws UserException {
+        Map<String, String> props = Maps.newHashMap();
+        props.put("glue.access_key", "a");
+        props.put("glue.secret_key", "test_secret_key");
+        props.put("glue.endpoint", "https://glue.us-west-2.amazonaws.com";);
+        AWSGlueProperties glueProperties = (AWSGlueProperties) 
MetastoreProperties.create(MetastoreProperties.Type.GLUE, props);
+        Map<String, String> catalogProps = new HashMap<>();
+        glueProperties.toIcebergGlueCatalogProperties(catalogProps);
+        Assertions.assertEquals("us-west-2", 
catalogProps.get("client.region"));
+        props.put("glue.endpoint", "https://glue-fips.us-west-2.api.aws";);
+        glueProperties = (AWSGlueProperties) 
MetastoreProperties.create(MetastoreProperties.Type.GLUE, props);
+        catalogProps = new HashMap<>();
+        glueProperties.toIcebergGlueCatalogProperties(catalogProps);
+        Assertions.assertEquals("us-west-2", 
catalogProps.get("client.region"));
+        props.put("glue.endpoint", 
"https://glue-fips.us-west-2.amazonaws.com";);
+        glueProperties = (AWSGlueProperties) 
MetastoreProperties.create(MetastoreProperties.Type.GLUE, props);
+        catalogProps = new HashMap<>();
+        glueProperties.toIcebergGlueCatalogProperties(catalogProps);
+        Assertions.assertEquals("us-west-2", 
catalogProps.get("client.region"));
+        props.put("glue.endpoint", "https://glue.us-west-2.api.aws";);
+        glueProperties = (AWSGlueProperties) 
MetastoreProperties.create(MetastoreProperties.Type.GLUE, props);
+        catalogProps = new HashMap<>();
+        glueProperties.toIcebergGlueCatalogProperties(catalogProps);
+        Assertions.assertEquals("us-west-2", 
catalogProps.get("client.region"));
+        props.put("glue.endpoint", "https://glue.us-west-2.amazonaws.com";);
+        glueProperties = (AWSGlueProperties) 
MetastoreProperties.create(MetastoreProperties.Type.GLUE, props);
+        catalogProps = new HashMap<>();
+        glueProperties.toIcebergGlueCatalogProperties(catalogProps);
+        Assertions.assertEquals("us-west-2", 
catalogProps.get("client.region"));
+
+        props.put("glue.endpoint", "glue.us-west-2.amazonaws.com");
+        glueProperties = (AWSGlueProperties) 
MetastoreProperties.create(MetastoreProperties.Type.GLUE, props);
+        catalogProps = new HashMap<>();
+        glueProperties.toIcebergGlueCatalogProperties(catalogProps);
+        Assertions.assertEquals("us-west-2", 
catalogProps.get("client.region"));
+        catalogProps = new HashMap<>();
+        glueProperties.toIcebergGlueCatalogProperties(catalogProps);
+        Assertions.assertEquals("us-west-2", 
catalogProps.get("client.region"));
+        props.put("glue.endpoint", "https://glue.us-west-2.amaaws.com";);
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
MetastoreProperties.create(MetastoreProperties.Type.GLUE, props), "AWS Glue 
properties (glue.endpoint) are not set correctly: 
https://glue.us-west-2.amaaws.com";);
+
+
+    }
+}
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/AWSTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/AWSTest.java
new file mode 100644
index 00000000000..1513c816733
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/AWSTest.java
@@ -0,0 +1,101 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import com.amazonaws.auth.SystemPropertiesCredentialsProvider;
+import com.amazonaws.services.s3.AmazonS3;
+import com.amazonaws.services.s3.AmazonS3ClientBuilder;
+import com.amazonaws.services.s3.model.ObjectListing;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.iceberg.aws.glue.GlueCatalog;
+import org.apache.iceberg.catalog.Namespace;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+@Disabled("Only run manually")
+public class AWSTest {
+    private static final String AWS_ACCESS_KEY_ID = "YOUR_ACCESS_KEY_ID"; // 
Replace with actual access key
+    private static final String AWS_SECRET_ACCESS_KEY = 
"YOUR_SECRET_ACCESS_KEY"; // Replace with actual secret key
+    private static final String AWS_REGION = "ap-northeast-1"; // Replace with 
actual region
+    private static final String S3_BUCKET_NAME = "test"; // Replace with 
actual bucket name
+    private static final String GLUE_CATALOG_NAME = "test"; // Replace with 
actual catalog name
+    private static final String S3A_PATH = 
"s3a://aws-glue-assets-123-ap-southeast-1/"; // Replace with actual S3A path
+
+    @BeforeEach
+    public void setUp() {
+        // Set AWS credentials and region using system properties
+        System.setProperty("aws.accessKeyId", AWS_ACCESS_KEY_ID);
+        System.setProperty("aws.secretKey", AWS_SECRET_ACCESS_KEY);
+        System.setProperty("aws.region", AWS_REGION);
+    }
+
+    @Test
+    public void testAWSS3() throws IOException {
+        // Create S3 client
+        AmazonS3 s3Client = AmazonS3ClientBuilder.standard()
+                .withRegion(AWS_REGION) // Set the region
+                .build();
+
+        // List S3 buckets
+        s3Client.listBuckets().forEach(bucket -> {
+            System.out.println("Bucket Name: " + bucket.getName());
+        });
+
+        // List objects in the specified S3 bucket
+        ObjectListing list = s3Client.listObjects(S3_BUCKET_NAME, "");
+        list.getObjectSummaries().forEach(objectSummary -> {
+            System.out.println("Object Key: " + objectSummary.getKey());
+        });
+    }
+
+    @Test
+    public void testGlueCatalog() throws IOException {
+        // Initialize Glue catalog with properties
+        Map<String, String> catalogProps = new HashMap<>();
+        GlueCatalog glueCatalog = new GlueCatalog();
+        glueCatalog.initialize(GLUE_CATALOG_NAME, catalogProps);
+
+        // List namespaces in the Glue catalog
+        glueCatalog.listNamespaces(Namespace.empty()).forEach(namespace -> {
+            System.out.println("Namespace: " + namespace);
+        });
+
+        // Configure Hadoop FileSystem to use S3A with 
SystemPropertiesCredentialsProvider
+        Configuration conf = new Configuration();
+        conf.set("fs.s3a.aws.credentials.provider", 
SystemPropertiesCredentialsProvider.class.getName()); // Use 
SystemPropertiesCredentialsProvider
+        conf.set("fs.defaultFS", S3A_PATH);
+        conf.set("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem");
+
+        // Get the FileSystem and list files in the specified S3A path
+        FileSystem fs = FileSystem.get(conf);
+        RemoteIterator<LocatedFileStatus> a = fs.listFiles(new Path(S3A_PATH), 
true);
+        while (a.hasNext()) {
+            LocatedFileStatus next = a.next();
+            System.out.println(next.getPath());
+        }
+    }
+}
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/AliyunDLFPropertiesTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/AliyunDLFPropertiesTest.java
new file mode 100644
index 00000000000..b004675b74a
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/AliyunDLFPropertiesTest.java
@@ -0,0 +1,130 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import org.apache.doris.common.UserException;
+
+import org.apache.paimon.options.Options;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class AliyunDLFPropertiesTest {
+    private static Map<String, String> baseProps;
+
+    @BeforeAll
+    public static void init() {
+        baseProps = new HashMap<>();
+        baseProps.put("paimon.catalog.type", "DLF");
+        baseProps.put("dlf.access_key", "my-access-key");
+        baseProps.put("dlf.secret_key", "my-secret-key");
+        baseProps.put("dlf.region", "cn-hangzhou");
+        baseProps.put("dlf.uid", "uid123");
+        baseProps.put("dlf.access.public", "true");
+        baseProps.put("dlf.extra.config", "extraValue");
+        baseProps.put("not.dlf.key", "ignoreMe");
+    }
+
+    @Test
+    public void testConstructor_shouldCaptureOnlyDlfPrefixedProps() throws 
UserException {
+        AliyunDLFProperties props = (AliyunDLFProperties) 
MetastoreProperties.create(baseProps);
+        Map<String, String> others = props.getOtherDlfProps();
+        Assertions.assertTrue(others.containsKey("dlf.extra.config"));
+        Assertions.assertFalse(others.containsKey("not.dlf.key"));
+    }
+
+    @Test
+    public void testToPaimonOptions_withExplicitEndpoint() throws 
UserException {
+        baseProps.put("dlf.endpoint", "explicit.endpoint.aliyun.com");
+
+        AliyunDLFProperties props = (AliyunDLFProperties) 
MetastoreProperties.create(baseProps);
+        Options options = new Options();
+        props.toPaimonOptions(options);
+
+        Assertions.assertEquals("explicit.endpoint.aliyun.com", 
options.get("dlf.catalog.endpoint"));
+        Assertions.assertEquals("my-access-key", 
options.get("dlf.catalog.accessKeyId"));
+        Assertions.assertEquals("my-secret-key", 
options.get("dlf.catalog.accessKeySecret"));
+        Assertions.assertEquals("cn-hangzhou", 
options.get("dlf.catalog.region"));
+        Assertions.assertEquals("uid123", options.get("dlf.catalog.uid"));
+        Assertions.assertEquals("true", 
options.get("dlf.catalog.accessPublic"));
+        Assertions.assertEquals("DLF_ONLY", 
options.get("dlf.catalog.proxyMode"));
+        Assertions.assertEquals("false", 
options.get("dlf.catalog.createDefaultDBIfNotExist"));
+
+        // extra config
+        Assertions.assertEquals("extraValue", options.get("dlf.extra.config"));
+    }
+
+    @Test
+    public void testToPaimonOptions_publicAccess() throws UserException {
+        baseProps.remove("dlf.endpoint");
+        baseProps.put("dlf.access.public", "TrUe"); // 测试大小写
+
+        AliyunDLFProperties props = (AliyunDLFProperties) 
MetastoreProperties.create(baseProps);
+
+        Options options = new Options();
+        props.toPaimonOptions(options);
+
+        Assertions.assertEquals("dlf.cn-hangzhou.aliyuncs.com", 
options.get("dlf.catalog.endpoint"));
+    }
+
+    @Test
+    public void testToPaimonOptions_privateVpcAccess() throws UserException {
+        baseProps.remove("dlf.endpoint");
+        baseProps.put("dlf.access.public", "true");
+
+        AliyunDLFProperties props = (AliyunDLFProperties) 
MetastoreProperties.create(baseProps);
+        Options options = new Options();
+        props.toPaimonOptions(options);
+
+        Assertions.assertEquals("dlf.cn-hangzhou.aliyuncs.com", 
options.get("dlf.catalog.endpoint"));
+    }
+
+    @Test
+    public void testToPaimonOptions_defaultVpcWhenPublicMissing() throws 
UserException {
+        baseProps.remove("dlf.endpoint");
+        baseProps.put("dlf.access.public", "false");
+
+        AliyunDLFProperties props = (AliyunDLFProperties) 
MetastoreProperties.create(baseProps);
+
+        Options options = new Options();
+        props.toPaimonOptions(options);
+
+        Assertions.assertEquals("dlf-vpc.cn-hangzhou.aliyuncs.com", 
options.get("dlf.catalog.endpoint"));
+    }
+
+    @Test
+    public void testToPaimonOptions_emptyConstructor() throws UserException {
+        AliyunDLFProperties props = (AliyunDLFProperties) 
MetastoreProperties.create(baseProps);
+
+
+        Options options = new Options();
+        props.toPaimonOptions(options);
+        // 检查关键字段存在
+        Assertions.assertEquals("DLF_ONLY", 
options.get("dlf.catalog.proxyMode"));
+        Assertions.assertEquals("false", 
options.get("dlf.catalog.createDefaultDBIfNotExist"));
+    }
+
+    @Test
+    public void testGetResourceConfigPropName() {
+        AliyunDLFProperties props = new AliyunDLFProperties(baseProps);
+        Assertions.assertEquals("dlf.resource_config", 
props.getResourceConfigPropName());
+    }
+}
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/GlueCatalogTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/GlueCatalogTest.java
new file mode 100644
index 00000000000..5e777717193
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/GlueCatalogTest.java
@@ -0,0 +1,110 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import org.apache.doris.common.UserException;
+
+import org.apache.iceberg.aws.glue.GlueCatalog;
+import org.apache.iceberg.catalog.Namespace;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+@Disabled("Disabled until AWS credentials are available")
+public class GlueCatalogTest {
+
+    private GlueCatalog glueCatalog;
+    private AWSGlueProperties glueProperties;
+    private static final Namespace queryNameSpace = Namespace.of("test"); // 
Replace with your namespace
+    private static final String AWS_ACCESS_KEY_ID = "YOUR_ACCESS_KEY_ID"; // 
Replace with actual access key
+    private static final String AWS_SECRET_ACCESS_KEY = 
"YOUR_SECRET_ACCESS_KEY"; // Replace with actual secret key
+    private static final String AWS_GLUE_ENDPOINT = 
"https://glue.ap-northeast-1.amazonaws.com";; // Replace with your endpoint
+
+    @BeforeEach
+    public void setUp() throws UserException {
+        glueCatalog = new GlueCatalog();
+        System.setProperty("queryNameSpace", "lakes_test_glue");
+
+        // Setup properties
+        Map<String, String> props = new HashMap<>();
+        // Use environment variables for sensitive keys
+        props.put("glue.access_key", AWS_ACCESS_KEY_ID);
+        props.put("glue.secret_key", AWS_SECRET_ACCESS_KEY);
+        props.put("glue.endpoint", AWS_GLUE_ENDPOINT);
+        props.put("type", "iceberg");
+        props.put("iceberg.catalog.type", "glue");
+
+
+        // Initialize AWSGlueProperties
+        glueProperties = (AWSGlueProperties) AWSGlueProperties.create(props);
+
+        // Convert to catalog properties
+        Map<String, String> catalogProps = new HashMap<>();
+        glueProperties.toIcebergGlueCatalogProperties(catalogProps);
+
+        // Initialize Glue Catalog
+        glueCatalog.initialize("ck", catalogProps);
+    }
+
+    @Test
+    public void testListNamespaces() {
+
+        // List namespaces and assert
+        glueCatalog.listNamespaces(Namespace.empty()).forEach(namespace1 -> {
+            System.out.println("Namespace: " + namespace1);
+            Assertions.assertNotNull(namespace1, "Namespace should not be 
null");
+        });
+    }
+
+    @Test
+    public void testListTables() {
+        // List tables in a given namespace
+        glueCatalog.listTables(queryNameSpace).forEach(tableIdentifier -> {
+            System.out.println("Table: " + tableIdentifier.name());
+            Assertions.assertNotNull(tableIdentifier, "TableIdentifier should 
not be null");
+
+            // Load table history and assert
+            glueCatalog.loadTable(tableIdentifier).history().forEach(snapshot 
-> {
+                System.out.println("Snapshot: " + snapshot);
+                Assertions.assertNotNull(snapshot, "Snapshot should not be 
null");
+            });
+        });
+    }
+
+    @Test
+    public void testConnection() {
+        // Check if catalog can be initialized without errors
+        Assertions.assertNotNull(glueCatalog, "Glue Catalog should be 
initialized");
+
+        // Ensure at least one namespace exists
+        
Assertions.assertFalse(glueCatalog.listNamespaces(Namespace.empty()).isEmpty(),
+                "Namespace list should not be empty");
+    }
+
+    @AfterEach
+    public void tearDown() throws IOException {
+        // Close the Glue Catalog
+        glueCatalog.close();
+    }
+}
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/HMSIntegrationTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/HMSIntegrationTest.java
new file mode 100644
index 00000000000..7c93bef6d76
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/HMSIntegrationTest.java
@@ -0,0 +1,222 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+import shade.doris.hive.org.apache.thrift.TException;
+
+import java.io.IOException;
+import java.security.PrivilegedAction;
+import java.util.ArrayList;
+import java.util.List;
+
+@Disabled
+public class HMSIntegrationTest {
+
+    // Hive configuration file path
+    private static final String HIVE_CONF_PATH = "";
+    // krb5 configuration file path
+    private static final String KRB5_CONF_PATH = "";
+    // Path to the Kerberos keytab file
+    private static final String KEYTAB_PATH = "";
+    // Principal name for Kerberos authentication
+    private static final String PRINCIPAL_NAME = "";
+
+    private static final String QUERY_DB_NAME = "";
+    private static final String QUERY_TBL_NAME = "";
+    private static final String CREATE_TBL_NAME = "";
+    private static final String CREATE_TBL_IN_DB_NAME = "";
+    // HDFS URI for the table location
+    private static final String HDFS_URI = "";
+    private static final boolean ENABLE_EXECUTE_CREATE_TABLE_TEST = false;
+
+    @Test
+    public  void testHms() throws IOException {
+        // Set up HiveConf and Kerberos authentication
+        HiveConf hiveConf = setupHiveConf();
+        setupKerberos(hiveConf);
+
+        // Authenticate user using the provided keytab file
+        UserGroupInformation ugi = authenticateUser();
+        System.out.println("User Credentials: " + ugi.getCredentials());
+
+        // Perform Hive MetaStore client operations
+        ugi.doAs((PrivilegedAction<Void>) () -> {
+            try {
+                HiveMetaStoreClient client = 
createHiveMetaStoreClient(hiveConf);
+
+                // Get database and table information
+                getDatabaseAndTableInfo(client);
+
+                // Create a new table in Hive
+                createNewTable(client);
+
+            } catch (TException e) {
+                throw new RuntimeException("HiveMetaStoreClient operation 
failed", e);
+            }
+            return null;
+        });
+    }
+
+    /**
+     * Sets up the HiveConf object by loading necessary configuration files.
+     *
+     * @return Configured HiveConf object
+     */
+    private static HiveConf setupHiveConf() {
+        HiveConf hiveConf = new HiveConf();
+        // Load the Hive configuration file
+        hiveConf.addResource(HIVE_CONF_PATH);
+        // Set Hive Metastore URIs and Kerberos principal
+        //if not in config-site
+        //hiveConf.set("hive.metastore.uris", "");
+        //hiveConf.set("hive.metastore.sasl.enabled", "true");
+        //hiveConf.set("hive.metastore.kerberos.principal", "");
+        return hiveConf;
+    }
+
+    /**
+     * Sets up Kerberos authentication properties in the HiveConf.
+     *
+     * @param hiveConf HiveConf object to update with Kerberos settings
+     */
+    private static void setupKerberos(HiveConf hiveConf) {
+        // Set the Kerberos configuration file path
+        System.setProperty("java.security.krb5.conf", KRB5_CONF_PATH);
+        // Enable Kerberos authentication for Hadoop
+        hiveConf.set("hadoop.security.authentication", "kerberos");
+        // Set the Hive configuration for Kerberos authentication
+        UserGroupInformation.setConfiguration(hiveConf);
+    }
+
+    /**
+     * Authenticates the user using Kerberos with a provided keytab file.
+     *
+     * @return Authenticated UserGroupInformation object
+     * @throws IOException If there is an error during authentication
+     */
+    private static UserGroupInformation authenticateUser() throws IOException {
+        return 
UserGroupInformation.loginUserFromKeytabAndReturnUGI(PRINCIPAL_NAME, 
KEYTAB_PATH);
+    }
+
+    /**
+     * Creates a new HiveMetaStoreClient using the provided HiveConf.
+     *
+     * @param hiveConf The HiveConf object with configuration settings
+     * @return A new instance of HiveMetaStoreClient
+     * @throws TException If there is an error creating the client
+     */
+    private static HiveMetaStoreClient createHiveMetaStoreClient(HiveConf 
hiveConf) throws TException {
+        return new HiveMetaStoreClient(hiveConf);
+    }
+
+    /**
+     * Retrieves database and table information from the Hive MetaStore.
+     *
+     * @param client The HiveMetaStoreClient used to interact with the 
MetaStore
+     * @throws TException If there is an error retrieving database or table 
info
+     */
+    private static void getDatabaseAndTableInfo(HiveMetaStoreClient client) 
throws TException {
+        // Retrieve and print the list of databases
+        System.out.println("Databases: " + client.getAllDatabases());
+        Table tbl = client.getTable(QUERY_DB_NAME, QUERY_TBL_NAME);
+        System.out.println(tbl);
+    }
+
+    /**
+     * Creates a new table in Hive with specified metadata.
+     *
+     * @param client The HiveMetaStoreClient used to create the table
+     * @throws TException If there is an error creating the table
+     */
+    private static void createNewTable(HiveMetaStoreClient client) throws 
TException {
+        if (!ENABLE_EXECUTE_CREATE_TABLE_TEST) {
+            return;
+        }
+        // Create StorageDescriptor for the table
+        StorageDescriptor storageDescriptor = createTableStorageDescriptor();
+
+        // Create the table object and set its properties
+        Table table = new Table();
+        table.setDbName(CREATE_TBL_IN_DB_NAME);
+        table.setTableName(CREATE_TBL_NAME);
+        table.setPartitionKeys(createPartitionColumns());
+        table.setSd(storageDescriptor);
+
+        // Create the table in the Hive MetaStore
+        client.createTable(table);
+        System.out.println("Table 'exampletable' created successfully.");
+    }
+
+    /**
+     * Creates the StorageDescriptor for a table, which includes columns and 
location.
+     *
+     * @return A StorageDescriptor object containing table metadata
+     */
+    private static StorageDescriptor createTableStorageDescriptor() {
+        // Define the table columns
+        List<FieldSchema> columns = new ArrayList<>();
+        columns.add(new FieldSchema("id", "int", "ID column"));
+        columns.add(new FieldSchema("name", "string", "Name column"));
+        columns.add(new FieldSchema("age", "int", "Age column"));
+
+        // Create and configure the StorageDescriptor for the table
+        StorageDescriptor storageDescriptor = new StorageDescriptor();
+        storageDescriptor.setCols(columns);
+        storageDescriptor.setLocation(HDFS_URI);
+
+        // Configure SerDe for the table
+        SerDeInfo serDeInfo = createSerDeInfo();
+        storageDescriptor.setSerdeInfo(serDeInfo);
+
+        return storageDescriptor;
+    }
+
+    /**
+     * Creates the SerDeInfo object for the table, which defines how data is 
serialized and deserialized.
+     *
+     * @return A SerDeInfo object with the specified serialization settings
+     */
+    private static SerDeInfo createSerDeInfo() {
+        SerDeInfo serDeInfo = new SerDeInfo();
+        serDeInfo.setName("example_serde");
+        
serDeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
+        return serDeInfo;
+    }
+
+    /**
+     * Creates the partition columns for the table.
+     *
+     * @return A list of FieldSchema objects representing partition columns
+     */
+    private static List<FieldSchema> createPartitionColumns() {
+        List<FieldSchema> partitionColumns = new ArrayList<>();
+        partitionColumns.add(new FieldSchema("year", "int", "Year partition"));
+        partitionColumns.add(new FieldSchema("month", "int", "Month 
partition"));
+        return partitionColumns;
+    }
+}
+
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/HMSPropertiesTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/HMSPropertiesTest.java
new file mode 100644
index 00000000000..7ea8f5cd6f6
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/HMSPropertiesTest.java
@@ -0,0 +1,122 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import org.apache.doris.common.Config;
+import org.apache.doris.common.UserException;
+
+import org.apache.paimon.options.Options;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+import java.net.URL;
+import java.util.HashMap;
+import java.util.Map;
+
+public class HMSPropertiesTest {
+
+    @Test
+    public void testHiveConfDirNotExist() {
+        Map<String, String> params = new HashMap<>();
+        params.put("hive.conf.resources", "/opt/hive-site.xml");
+        params.put("metastore.type", "hms");
+        Map<String, String> finalParams = params;
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
MetastoreProperties.create(finalParams));
+    }
+
+    @Test
+    public void testHiveConfDirExist() throws UserException {
+        URL hiveFileUrl = 
HMSPropertiesTest.class.getClassLoader().getResource("plugins");
+        Config.hadoop_config_dir = hiveFileUrl.getPath().toString();
+        Map<String, String> params = new HashMap<>();
+        params.put("hive.conf.resources", "/hive-conf/hive1/hive-site.xml");
+        params.put("metastore.type", "hms");
+        HMSProperties hmsProperties;
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
MetastoreProperties.create(params));
+        params.put("hive.metastore.uris", "thrift://default:9083");
+        hmsProperties = (HMSProperties) MetastoreProperties.create(params);
+        Map<String, String> hiveConf = 
hmsProperties.loadConfigFromFile("hive.conf.resources");
+        Assertions.assertNotNull(hiveConf);
+        Assertions.assertEquals("/user/hive/default", 
hiveConf.get("hive.metastore.warehouse.dir"));
+    }
+
+    @Test
+    public void testBasicParamsTest() throws UserException {
+        Map<String, String> notValidParams = new HashMap<>();
+        notValidParams.put("metastore.type", "hms");
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
MetastoreProperties.create(notValidParams));
+        // Step 1: Set up initial parameters for HMSProperties
+        Map<String, String> params = createBaseParams();
+
+        // Step 2: Test HMSProperties to PaimonOptions and Conf conversion
+        HMSProperties hmsProperties = getHMSProperties(params);
+        testHmsToPaimonOptions(hmsProperties);
+
+        // Step 3: Test HMSProperties to Iceberg Hive Catalog properties 
conversion
+        testHmsToIcebergHiveCatalog(hmsProperties);
+
+        // Step 4: Test invalid scenario when both SASL and kerberos are 
enabled
+        params.put("hive.metastore.sasl.enabled", "true");
+        params.put("hive.metastore.authentication.type", "kerberos");
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
MetastoreProperties.create(params));
+    }
+
+    private Map<String, String> createBaseParams() {
+        Map<String, String> params = new HashMap<>();
+        params.put("metastore.type", "hms");
+        params.put("hive.metastore.uris", "thrift://127.0.0.1:9083");
+        params.put("hive.metastore.authentication.type", "simple");
+        return params;
+    }
+
+    private HMSProperties getHMSProperties(Map<String, String> params) throws 
UserException {
+        return (HMSProperties) MetastoreProperties.create(params);
+    }
+
+    private void testHmsToPaimonOptions(HMSProperties hmsProperties) {
+        Options paimonOptions = new Options();
+        hmsProperties.toPaimonOptionsAndConf(paimonOptions);
+        Assertions.assertEquals("thrift://127.0.0.1:9083", 
paimonOptions.get("uri"));
+    }
+
+    private void testHmsToIcebergHiveCatalog(HMSProperties hmsProperties) {
+        Map<String, String> icebergMSParams = new HashMap<>();
+        hmsProperties.toIcebergHiveCatalogProperties(icebergMSParams);
+        Assertions.assertEquals("thrift://127.0.0.1:9083", 
icebergMSParams.get("uri"));
+    }
+
+    @Test
+    public void testHmsKerberosParams() throws UserException {
+        Map<String, String> params = createBaseParams();
+        params.put("hive.metastore.uris", "thrift://127.0.0.1:9083");
+        params.put("hive.metastore.sasl.enabled", "true");
+        params.put("hive.metastore.authentication.type", "kerberos");
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
MetastoreProperties.create(params));
+        params.put("hive.metastore.client.principal", 
"hive/[email protected]");
+        params.put("hive.metastore.client.keytab", "/path/to/keytab");
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
MetastoreProperties.create(params),
+                "Hive metastore authentication type is kerberos, but service 
principal, client principal or client keytab is not set.");
+        params.put("hive.metastore.service.principal", 
"hive/[email protected]");
+        HMSProperties hmsProperties = getHMSProperties(params);
+        Map<String, String> icebergMSParams = new HashMap<>();
+        hmsProperties.toIcebergHiveCatalogProperties(icebergMSParams);
+        Assertions.assertEquals("hive/[email protected]", 
icebergMSParams.get("hive.metastore.client.principal"));
+        Assertions.assertEquals("/path/to/keytab", 
icebergMSParams.get("hive.metastore.client.keytab"));
+        Assertions.assertEquals("thrift://127.0.0.1:9083", 
icebergMSParams.get("uri"));
+    }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to