This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new 0e0985556bf branch-3.1: [feat](storage)Support GCS Storage #55345 
(#55406)
0e0985556bf is described below

commit 0e0985556bf9e11311e099e8d22390929d059f71
Author: Calvin Kirs <[email protected]>
AuthorDate: Thu Aug 28 14:16:26 2025 +0800

    branch-3.1: [feat](storage)Support GCS Storage #55345 (#55406)
    
    #55345
    
    // Custom FileIO is not automatically enabled because it may cause
    serialization
    // issues when accessing Iceberg system tables. Users can manually
    configure and
    // enable Kerberos-aware FileIO (e.g., KerberizedHadoopFileIO) if
    required
---
 .../java/org/apache/doris/common/util/S3URI.java   |   4 +-
 .../IcebergFileSystemMetaStoreProperties.java      |   7 +-
 .../metastore/IcebergHMSMetaStoreProperties.java   |   8 +-
 .../storage/AbstractS3CompatibleProperties.java    |   3 +
 .../datasource/property/storage/GCSProperties.java | 199 +++++++++++++++++++++
 .../property/storage/StorageProperties.java        |   3 +
 .../java/org/apache/doris/fs/SchemaTypeMapper.java |   1 +
 .../org/apache/doris/fs/StorageTypeMapper.java     |   2 +
 .../doris/datasource/property/storage/GCSIT.java   | 103 +++++++++++
 .../property/storage/GCSPropertiesTest.java        |  99 ++++++++++
 .../iceberg_on_hms_and_filesystem_and_dlf.groovy   | 106 ++++++++++-
 11 files changed, 523 insertions(+), 12 deletions(-)

diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/S3URI.java 
b/fe/fe-core/src/main/java/org/apache/doris/common/util/S3URI.java
index 5ef6e4024df..63ab2081110 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/S3URI.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/S3URI.java
@@ -71,10 +71,10 @@ public class S3URI {
     public static final String SCHEME_DELIM = "://";
     public static final String PATH_DELIM = "/";
     private static final Set<String> VALID_SCHEMES = ImmutableSet.of("http", 
"https", "s3", "s3a", "s3n",
-            "bos", "oss", "cos", "cosn", "obs", "azure");
+            "bos", "oss", "cos", "cosn", "obs", "gs", "azure");
 
     private static final Set<String> OS_SCHEMES = ImmutableSet.of("s3", "s3a", 
"s3n",
-            "bos", "oss", "cos", "cosn", "obs", "azure");
+            "bos", "oss", "cos", "cosn", "gs", "obs", "azure");
 
     /** Suffix of S3Express storage bucket names. */
     private static final String S3_DIRECTORY_BUCKET_SUFFIX = "--x-s3";
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergFileSystemMetaStoreProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergFileSystemMetaStoreProperties.java
index 7dd97b028b2..9323d78e318 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergFileSystemMetaStoreProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergFileSystemMetaStoreProperties.java
@@ -24,7 +24,6 @@ import 
org.apache.doris.datasource.property.storage.StorageProperties;
 
 import org.apache.commons.lang3.exception.ExceptionUtils;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.iceberg.CatalogProperties;
 import org.apache.iceberg.catalog.Catalog;
 import org.apache.iceberg.hadoop.HadoopCatalog;
 
@@ -76,8 +75,10 @@ public class IcebergFileSystemMetaStoreProperties extends 
AbstractIcebergPropert
         if (storagePropertiesList.size() == 1 && storagePropertiesList.get(0) 
instanceof HdfsProperties) {
             HdfsProperties hdfsProps = (HdfsProperties) 
storagePropertiesList.get(0);
             if (hdfsProps.isKerberos()) {
-                props.put(CatalogProperties.FILE_IO_IMPL,
-                        
"org.apache.doris.datasource.iceberg.fileio.DelegateFileIO");
+                // NOTE: Custom FileIO implementation (KerberizedHadoopFileIO) 
is commented out by default.
+                // Using FileIO for Kerberos authentication may cause 
serialization issues when accessing
+                // Iceberg system tables (e.g., history, snapshots, manifests).
+                
//props.put(CatalogProperties.FILE_IO_IMPL,"org.apache.doris.datasource.iceberg.fileio.DelegateFileIO");
                 this.executionAuthenticator = new 
HadoopExecutionAuthenticator(hdfsProps.getHadoopAuthenticator());
             }
         }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergHMSMetaStoreProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergHMSMetaStoreProperties.java
index db9ec672d4a..871b3b3aec6 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergHMSMetaStoreProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergHMSMetaStoreProperties.java
@@ -20,7 +20,6 @@ package org.apache.doris.datasource.property.metastore;
 import 
org.apache.doris.common.security.authentication.HadoopExecutionAuthenticator;
 import org.apache.doris.datasource.iceberg.IcebergExternalCatalog;
 import org.apache.doris.datasource.property.ConnectorProperty;
-import org.apache.doris.datasource.property.storage.HdfsProperties;
 import org.apache.doris.datasource.property.storage.StorageProperties;
 
 import org.apache.commons.lang3.StringUtils;
@@ -74,13 +73,16 @@ public class IcebergHMSMetaStoreProperties extends 
AbstractIcebergProperties {
             for (Map.Entry<String, String> entry : 
sp.getHadoopStorageConfig()) {
                 catalogProps.put(entry.getKey(), entry.getValue());
             }
-            if (sp instanceof HdfsProperties) {
+            // NOTE: Custom FileIO implementation (KerberizedHadoopFileIO) is 
commented out by default.
+            // Using FileIO for Kerberos authentication may cause 
serialization issues when accessing
+            // Iceberg system tables (e.g., history, snapshots, manifests).
+            /*if (sp instanceof HdfsProperties) {
                 HdfsProperties hdfsProps = (HdfsProperties) sp;
                 if (hdfsProps.isKerberos()) {
                     catalogProps.put(CatalogProperties.FILE_IO_IMPL,
                             
"org.apache.doris.datasource.iceberg.fileio.DelegateFileIO");
                 }
-            }
+            }*/
         });
         try {
             this.executionAuthenticator.execute(() -> 
hiveCatalog.initialize(catalogName, catalogProps));
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AbstractS3CompatibleProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AbstractS3CompatibleProperties.java
index 3b0bca28365..d2ae8d4260d 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AbstractS3CompatibleProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AbstractS3CompatibleProperties.java
@@ -222,6 +222,9 @@ public abstract class AbstractS3CompatibleProperties 
extends StorageProperties i
             // Endpoint is not required, so we consider it valid if empty.
             return true;
         }
+        if (endpointPatterns().isEmpty()) {
+            return true;
+        }
         for (Pattern pattern : endpointPatterns()) {
             Matcher matcher = pattern.matcher(endpoint.toLowerCase());
             if (matcher.matches()) {
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/GCSProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/GCSProperties.java
new file mode 100644
index 00000000000..d99aec3a461
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/GCSProperties.java
@@ -0,0 +1,199 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import org.apache.doris.datasource.property.ConnectorProperty;
+
+import com.google.common.collect.ImmutableSet;
+import lombok.Getter;
+import lombok.Setter;
+import org.apache.commons.lang3.StringUtils;
+import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.regex.Pattern;
+
+/**
+ * Google Cloud Storage (GCS) properties based on the S3-compatible protocol.
+ *
+ * <p>
+ * Key differences and considerations:
+ * <ul>
+ *   <li>The default endpoint is {@code https://storage.googleapis.com}, which 
usually does not need
+ *       to be configured unless a custom domain is required.</li>
+ *   <li>The region is typically not relevant for GCS since it is mapped 
internally by bucket,
+ *       but may still be required when using the S3-compatible API.</li>
+ *   <li>Access Key and Secret Key are not native GCS concepts. They exist 
here only for compatibility
+ *       with the S3 protocol. Google recommends using OAuth2.0, Service 
Accounts, or other native
+ *       authentication methods instead.</li>
+ *   <li>Compatibility with older versions:
+ *       <ul>
+ *         <li>Previously, the endpoint was required. For example,
+ *             {@code gs.endpoint=https://storage.googleapis.com} is valid and 
backward-compatible.</li>
+ *         <li>If a custom endpoint is used (e.g., {@code 
https://my-custom-endpoint.com}),
+ *             the user must explicitly declare that this is GCS storage and 
configure the mapping.</li>
+ *       </ul>
+ *   </li>
+ *   <li>Additional authentication methods (e.g., OAuth2, Service Account) may 
be supported in the future.</li>
+ * </ul>
+ * </p>
+ */
+public class GCSProperties extends AbstractS3CompatibleProperties {
+
+    private static final Set<String> GS_ENDPOINT_ALIAS = ImmutableSet.of(
+            "s3.endpoint", "AWS_ENDPOINT", "endpoint", "ENDPOINT");
+
+    private static final String GCS_ENDPOINT_KEY_NAME = "gs.endpoint";
+
+
+    @Setter
+    @Getter
+    @ConnectorProperty(names = {"gs.endpoint", "s3.endpoint", "AWS_ENDPOINT", 
"endpoint", "ENDPOINT"},
+            required = false,
+            description = "The endpoint of GCS.")
+    protected String endpoint = "https://storage.googleapis.com";;
+
+    @Getter
+    protected String region = "us-east1";
+
+    @Getter
+    @ConnectorProperty(names = {"gs.access_key", "s3.access_key", 
"AWS_ACCESS_KEY", "access_key", "ACCESS_KEY"},
+            required = false,
+            description = "The access key of GCS.")
+    protected String accessKey = "";
+
+    @Getter
+    @ConnectorProperty(names = {"gs.secret_key", "s3.secret_key", 
"AWS_SECRET_KEY", "secret_key", "SECRET_KEY"},
+            required = false,
+            description = "The secret key of GCS.")
+    protected String secretKey = "";
+
+    @Getter
+    @ConnectorProperty(names = {"gs.session_token", "s3.session_token", 
"session_token"},
+            required = false,
+            description = "The session token of GCS.")
+    protected String sessionToken = "";
+
+    /**
+     * The maximum number of concurrent connections that can be made to the 
object storage system.
+     * This value is optional and can be configured by the user.
+     */
+    @Getter
+    @ConnectorProperty(names = {"gs.connection.maximum", 
"s3.connection.maximum"}, required = false,
+            description = "Maximum number of connections.")
+    protected String maxConnections = "100";
+
+    /**
+     * The timeout (in milliseconds) for requests made to the object storage 
system.
+     * This value is optional and can be configured by the user.
+     */
+    @Getter
+    @ConnectorProperty(names = {"gs.connection.request.timeout", 
"s3.connection.request.timeout"}, required = false,
+            description = "Request timeout in seconds.")
+    protected String requestTimeoutS = "10000";
+
+    /**
+     * The timeout (in milliseconds) for establishing a connection to the 
object storage system.
+     * This value is optional and can be configured by the user.
+     */
+    @Getter
+    @ConnectorProperty(names = {"gs.connection.timeout", 
"s3.connection.timeout"}, required = false,
+            description = "Connection timeout in seconds.")
+    protected String connectionTimeoutS = "10000";
+
+    /**
+     * Flag indicating whether to use path-style URLs for the object storage 
system.
+     * This value is optional and can be configured by the user.
+     */
+    @Setter
+    @Getter
+    @ConnectorProperty(names = {"gs.use_path_style", "use_path_style", 
"s3.path-style-access"}, required = false,
+            description = "Whether to use path style URL for the storage.")
+    protected String usePathStyle = "false";
+
+    @ConnectorProperty(names = {"gs.force_parsing_by_standard_uri", 
"force_parsing_by_standard_uri"}, required = false,
+            description = "Whether to use path style URL for the storage.")
+    @Setter
+    @Getter
+    protected String forceParsingByStandardUrl = "false";
+
+    /**
+     * Constructor to initialize the object storage properties with the 
provided type and original properties map.
+     *
+     * @param origProps the original properties map.
+     */
+    protected GCSProperties(Map<String, String> origProps) {
+        super(Type.GCS, origProps);
+    }
+
+    public static boolean guessIsMe(Map<String, String> props) {
+        // check has gcs specific keys,ignore case
+        if (props.containsKey(GCS_ENDPOINT_KEY_NAME) && 
StringUtils.isNotBlank(props.get(GCS_ENDPOINT_KEY_NAME))) {
+            return true;
+        }
+        String endpoint;
+        for (String key : props.keySet()) {
+            if (GS_ENDPOINT_ALIAS.contains(key.toLowerCase())) {
+                endpoint = props.get(key);
+                if (StringUtils.isNotBlank(endpoint) && 
endpoint.toLowerCase().endsWith("storage.googleapis.com")) {
+                    return true;
+                }
+            }
+        }
+        return false;
+    }
+
+    @Override
+    protected Set<Pattern> endpointPatterns() {
+        return new HashSet<>();
+    }
+
+
+    @Override
+    public void setRegion(String region) {
+        this.region = region;
+    }
+
+    @Override
+    public void initializeHadoopStorageConfig() {
+        super.initializeHadoopStorageConfig();
+        hadoopStorageConfig.set("fs.gs.impl", 
"org.apache.hadoop.fs.s3a.S3AFileSystem");
+    }
+
+    public Map<String, String> getBackendConfigProperties() {
+        Map<String, String> backendProperties = 
generateBackendS3Configuration();
+        backendProperties.put("provider", "GCP");
+        return backendProperties;
+    }
+
+    @Override
+    public AwsCredentialsProvider getAwsCredentialsProvider() {
+        AwsCredentialsProvider credentialsProvider = 
super.getAwsCredentialsProvider();
+        if (credentialsProvider != null) {
+            return credentialsProvider;
+        }
+        if (StringUtils.isBlank(accessKey) && StringUtils.isBlank(secretKey)) {
+            // For anonymous access (no credentials required)
+            return AnonymousCredentialsProvider.create();
+        }
+        return null;
+    }
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProperties.java
index 274343ad02d..2ce87f9ffb9 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProperties.java
@@ -56,6 +56,7 @@ public abstract class StorageProperties extends 
ConnectionProperties {
         OSS,
         OBS,
         COS,
+        GCS,
         OSS_HDFS,
         MINIO,
         AZURE,
@@ -171,6 +172,8 @@ public abstract class StorageProperties extends 
ConnectionProperties {
                             || OBSProperties.guessIsMe(props)) ? new 
OBSProperties(props) : null,
                     props -> (isFsSupport(props, FS_COS_SUPPORT)
                             || COSProperties.guessIsMe(props)) ? new 
COSProperties(props) : null,
+                    props -> (isFsSupport(props, FS_GCS_SUPPORT)
+                            || GCSProperties.guessIsMe(props)) ? new 
GCSProperties(props) : null,
                     props -> (isFsSupport(props, FS_AZURE_SUPPORT)
                             || AzureProperties.guessIsMe(props)) ? new 
AzureProperties(props) : null,
                     props -> (isFsSupport(props, FS_MINIO_SUPPORT)
diff --git a/fe/fe-core/src/main/java/org/apache/doris/fs/SchemaTypeMapper.java 
b/fe/fe-core/src/main/java/org/apache/doris/fs/SchemaTypeMapper.java
index 8b54250ed59..03240f74367 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/fs/SchemaTypeMapper.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/fs/SchemaTypeMapper.java
@@ -65,6 +65,7 @@ public enum SchemaTypeMapper {
     OSS("oss", StorageProperties.Type.OSS, FileSystemType.S3, 
TFileType.FILE_S3),
     OBS("obs", StorageProperties.Type.OBS, FileSystemType.S3, 
TFileType.FILE_S3),
     COS("cos", StorageProperties.Type.COS, FileSystemType.S3, 
TFileType.FILE_S3),
+    GCS("gs", StorageProperties.Type.GCS, FileSystemType.S3, 
TFileType.FILE_S3),
     //MINIO("minio", StorageProperties.Type.MINIO),
     /*
      * Only secure protocols are supported to ensure safe access to Azure 
storage services.
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/fs/StorageTypeMapper.java 
b/fe/fe-core/src/main/java/org/apache/doris/fs/StorageTypeMapper.java
index 476d14e7198..aacb098102f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/fs/StorageTypeMapper.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/fs/StorageTypeMapper.java
@@ -20,6 +20,7 @@ package org.apache.doris.fs;
 import org.apache.doris.datasource.property.storage.AzureProperties;
 import org.apache.doris.datasource.property.storage.BrokerProperties;
 import org.apache.doris.datasource.property.storage.COSProperties;
+import org.apache.doris.datasource.property.storage.GCSProperties;
 import org.apache.doris.datasource.property.storage.HdfsProperties;
 import org.apache.doris.datasource.property.storage.MinioProperties;
 import org.apache.doris.datasource.property.storage.OBSProperties;
@@ -43,6 +44,7 @@ public enum StorageTypeMapper {
     MINIO(MinioProperties.class, S3FileSystem::new),
     AZURE(AzureProperties.class, AzureFileSystem::new),
     S3(S3Properties.class, S3FileSystem::new),
+    GCS(GCSProperties.class, S3FileSystem::new),
     HDFS(HdfsProperties.class, DFSFileSystem::new),
     BROKER(BrokerProperties.class, BrokerFileSystem::new),
     OSS_HDFS(OSSHdfsProperties.class, DFSFileSystem::new);
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/GCSIT.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/GCSIT.java
new file mode 100644
index 00000000000..33ef5ca5d0b
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/GCSIT.java
@@ -0,0 +1,103 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+/**
+ * Integration test for Google Cloud Storage (GCS) using S3AFileSystem.
+ *
+ * <p>
+ * This test is configurable via system properties for endpoint, region,
+ * access key, secret key, and GCS URI. Defaults are provided for convenience.
+ * </p>
+ *
+ * <p>
+ * Example usage:
+ * <pre>
+ * mvn test -Dgcs.endpoint=https://storage.googleapis.com \
+ *          -Dgcs.accessKey=ccc \
+ *          -Dgcs.secretKey=xxxx \
+ *          
-Dgcs.uri=gs://wd-test123/sales_data/2025-04-08/part-000000000000.parquet
+ * </pre>
+ * </p>
+ */
+@Disabled("Disabled by default. Enable and configure to run against GCS.")
+public class GCSIT {
+
+    // Configurable parameters
+    private String endpoint;
+    private String region;
+    private String accessKey;
+    private String secretKey;
+    private String gcsUri;
+
+    private Configuration hadoopConfig;
+
+    /**
+     * Setup method to initialize Hadoop configuration before each test.
+     * Values can be overridden via system properties.
+     */
+    @BeforeEach
+    public void setUp() {
+        // Load configuration from system properties or use default values
+        endpoint = System.getProperty("gcs.endpoint", 
"https://storage.googleapis.com";);
+        region = System.getProperty("gcs.region", "us-east1");
+        accessKey = System.getProperty("gcs.accessKey", "your-access-key");
+        secretKey = System.getProperty("gcs.secretKey", "your-secret-key");
+        gcsUri = System.getProperty("gcs.uri", 
"gs://your-bucket/path/to/file.parquet");
+
+        // Hadoop configuration for S3AFileSystem
+        hadoopConfig = new Configuration();
+        hadoopConfig.set("fs.gs.impl", 
"org.apache.hadoop.fs.s3a.S3AFileSystem");
+        hadoopConfig.set("fs.s3a.impl", 
"org.apache.hadoop.fs.s3a.S3AFileSystem");
+        hadoopConfig.set("fs.s3a.endpoint", endpoint);
+        hadoopConfig.set("fs.s3a.endpoint.region", region);
+        hadoopConfig.set("fs.s3a.access.key", accessKey);
+        hadoopConfig.set("fs.s3a.secret.key", secretKey);
+        hadoopConfig.set("fs.defaultFS", gcsUri);
+    }
+
+    /**
+     * Test to verify if a GCS file exists and print filesystem scheme.
+     *
+     * @throws URISyntaxException if the GCS URI is invalid
+     * @throws IOException if an I/O error occurs
+     */
+    @Disabled
+    @Test
+    public void testGCSFileExists() throws URISyntaxException, IOException {
+        FileSystem fs = FileSystem.get(new URI(gcsUri), hadoopConfig);
+
+        System.out.println("FileSystem scheme: " + fs.getScheme());
+        System.out.println("File exists: " + fs.exists(new Path(gcsUri)));
+
+        // Assert that the file exists
+        Assertions.assertTrue(fs.exists(new Path(gcsUri)), "File should exist 
in GCS");
+    }
+}
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/GCSPropertiesTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/GCSPropertiesTest.java
new file mode 100644
index 00000000000..dd986134a1c
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/GCSPropertiesTest.java
@@ -0,0 +1,99 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class GCSPropertiesTest {
+
+    private Map<String, String> origProps;
+
+    @BeforeEach
+    public void setUp() {
+        origProps = new HashMap<>();
+    }
+
+    @Test
+    public void testGuessIsMeByEndpoint() {
+        origProps.put("gs.endpoint", "https://storage.googleapis.com";);
+        Assertions.assertTrue(GCSProperties.guessIsMe(origProps));
+        origProps.clear();
+        origProps.put("s3.endpoint", "https://storage.googleapis.com";);
+        Assertions.assertTrue(GCSProperties.guessIsMe(origProps));
+        origProps.clear();
+        origProps.put("endpoint", "https://my.custom.endpoint.com";);
+        Assertions.assertFalse(GCSProperties.guessIsMe(origProps));
+        origProps.put("gs.endpoint", "https://my.custom.endpoint.com";);
+        Assertions.assertTrue(GCSProperties.guessIsMe(origProps));
+    }
+
+    @Test
+    public void testDefaultValues() {
+        origProps.put("gs.endpoint", "https://storage.googleapis.com";);
+        origProps.put("gs.access_key", "myAccessKey");
+        origProps.put("gs.secret_key", "mySecretKey");
+        GCSProperties gcsProperties = (GCSProperties) 
StorageProperties.createPrimary(origProps);
+        Assertions.assertEquals("https://storage.googleapis.com";, 
gcsProperties.getEndpoint());
+        Assertions.assertEquals("us-east1", gcsProperties.getRegion()); // 
default
+        Assertions.assertEquals("myAccessKey", gcsProperties.getAccessKey());
+        Assertions.assertEquals("mySecretKey", gcsProperties.getSecretKey());
+        Assertions.assertEquals("false", gcsProperties.getUsePathStyle());
+    }
+
+    @Test
+    public void testOverrideRegionAndPathStyle() {
+        origProps.put("gs.endpoint", "https://storage.googleapis.com";);
+        origProps.put("gs.access_key", "myAccessKey");
+        origProps.put("gs.secret_key", "mySecretKey");
+        origProps.put("gs.use_path_style", "true");
+
+        GCSProperties gcsProperties = (GCSProperties) 
StorageProperties.createPrimary(origProps);
+        gcsProperties.setRegion("asia-northeast1");
+
+        Assertions.assertEquals("asia-northeast1", gcsProperties.getRegion());
+        Assertions.assertEquals("true", gcsProperties.getUsePathStyle());
+    }
+
+    @Test
+    public void testGenerateBackendS3Configuration() {
+        origProps.put("gs.endpoint", "https://storage.googleapis.com";);
+        origProps.put("gs.access_key", "myAccessKey");
+        origProps.put("gs.secret_key", "mySecretKey");
+        origProps.put("gs.connection.maximum", "200");
+        origProps.put("gs.connection.request.timeout", "999");
+        origProps.put("gs.connection.timeout", "888");
+        origProps.put("gs.use_path_style", "true");
+
+        GCSProperties gcsProperties = (GCSProperties) 
StorageProperties.createPrimary(origProps);
+        Map<String, String> s3Props = 
gcsProperties.generateBackendS3Configuration();
+
+        Assertions.assertEquals("https://storage.googleapis.com";, 
s3Props.get("AWS_ENDPOINT"));
+        Assertions.assertEquals("us-east1", s3Props.get("AWS_REGION"));
+        Assertions.assertEquals("myAccessKey", s3Props.get("AWS_ACCESS_KEY"));
+        Assertions.assertEquals("mySecretKey", s3Props.get("AWS_SECRET_KEY"));
+        Assertions.assertEquals("200", s3Props.get("AWS_MAX_CONNECTIONS"));
+        Assertions.assertEquals("999", s3Props.get("AWS_REQUEST_TIMEOUT_MS"));
+        Assertions.assertEquals("888", 
s3Props.get("AWS_CONNECTION_TIMEOUT_MS"));
+        Assertions.assertEquals("true", s3Props.get("use_path_style"));
+    }
+}
diff --git 
a/regression-test/suites/external_table_p2/refactor_catalog_param/iceberg_on_hms_and_filesystem_and_dlf.groovy
 
b/regression-test/suites/external_table_p2/refactor_catalog_param/iceberg_on_hms_and_filesystem_and_dlf.groovy
index fa0a82ebfe6..17a6b414461 100644
--- 
a/regression-test/suites/external_table_p2/refactor_catalog_param/iceberg_on_hms_and_filesystem_and_dlf.groovy
+++ 
b/regression-test/suites/external_table_p2/refactor_catalog_param/iceberg_on_hms_and_filesystem_and_dlf.groovy
@@ -65,10 +65,108 @@ suite("iceberg_on_hms_and_filesystem_and_dlf", 
"p2,external,new_catalog_property
             SELECT * FROM ${table_name};
         """
         assert queryResult.size() == 1
+        def branch_name = prefix + "_branch"
+        def tag_name = prefix + "_tag"
+        sql """
+            ALTER TABLE ${table_name} CREATE BRANCH ${branch_name};
+        """
+        sql """
+            ALTER TABLE ${table_name} CREATE TAG ${tag_name};
+        """
+        sql """
+            INSERT OVERWRITE  TABLE ${table_name} VALUES (1, 'a', 10),(2, 'b', 
20), (3, 'c', 30)
+        """
+        def originalQueryResult = sql """
+            SELECT * FROM ${table_name};
+        """
+        assert originalQueryResult.size() == 3
+        sql """
+            insert into ${table_name}@branch(${branch_name}) values (4, 'd', 
40)
+        """
+        def branchQueryResult = sql """
+            SELECT * FROM ${table_name}@branch(${branch_name});
+        """
+        assert branchQueryResult.size() == 2
+
+
+        def tagQueryResult = sql """
+            SELECT * FROM ${table_name}@tag(${tag_name});
+        """
+        assert tagQueryResult.size() == 1
+        sql """
+            ALTER TABLE ${table_name} drop branch ${branch_name};
+        """
+        sql """
+            ALTER TABLE ${table_name} drop tag ${tag_name};
+        """
+        try {
+            def sys_query_result = sql """
+            SELECT * FROM ${table_name}\$files;
+        """
+            println sys_query_result
+            println "iceberg_meta_result SUCCESS" + catalog_name
+
+            def iceberg_meta_result = sql """
+        SELECT snapshot_id FROM iceberg_meta(
+                'table' = '${catalog_name}.${db_name}.${table_name}',
+                'query_type' = 'snapshots'
+        ) order by committed_at desc;
+        
+        """
+            def first_snapshot_id = iceberg_meta_result.get(0).get(0);
+            def time_travel =sql """
+            SELECT * FROM ${table_name} FOR VERSION AS OF ${first_snapshot_id};
+        """
+            println time_travel
+
+            println "iceberg_time_travel SUCCESS" + catalog_name
+        }catch (Exception e) {
+            println catalog_name + "system info error"
+        }
+
 
         sql """
             DROP TABLE ${table_name};
         """
+        //partition table
+        table_name = prefix + "_partition_table"
+        sql """
+            CREATE TABLE ${table_name} (
+              `ts` DATETIME COMMENT 'ts',
+              `col1` BOOLEAN COMMENT 'col1',
+              `col2` INT COMMENT 'col2',
+              `col3` BIGINT COMMENT 'col3',
+              `col4` FLOAT COMMENT 'col4',
+              `col5` DOUBLE COMMENT 'col5',
+              `col6` DECIMAL(9,4) COMMENT 'col6',
+              `col7` STRING COMMENT 'col7',
+              `col8` DATE COMMENT 'col8',
+              `col9` DATETIME COMMENT 'col9',
+              `pt1` STRING COMMENT 'pt1',
+              `pt2` STRING COMMENT 'pt2'
+            )
+            PARTITION BY LIST (day(ts), pt1, pt2) ()
+            PROPERTIES (
+              'write-format'='orc',
+              'compression-codec'='zlib'
+            );
+        """
+
+        sql """
+            INSERT OVERWRITE  TABLE ${table_name} values 
+            ('2023-01-01 00:00:00', true, 1, 1, 1.0, 1.0, 1.0000, '1', 
'2023-01-01', '2023-01-01 00:00:00', 'a', '1'),
+            ('2023-01-02 00:00:00', false, 2, 2, 2.0, 2.0, 2.0000, '2', 
'2023-01-02', '2023-01-02 00:00:00', 'b', '2'),
+            ('2023-01-03 00:00:00', true, 3, 3, 3.0, 3.0, 3.0000, '3', 
'2023-01-03', '2023-01-03 00:00:00', 'c', '3');
+        """
+        def partitionQueryResult = sql """
+            SELECT * FROM ${table_name} WHERE pt1='a' and pt2='1';
+        """
+        assert partitionQueryResult.size() == 1
+
+        sql """
+            DROP TABLE ${table_name};
+        """
+
         sql """
             DROP DATABASE ${db_name} FORCE;
         """
@@ -232,7 +330,7 @@ suite("iceberg_on_hms_and_filesystem_and_dlf", 
"p2,external,new_catalog_property
     testQueryAndInsert(iceberg_hms_type_prop + hms_kerberos_new_prop+ 
warehouse + oss_storage_properties, "iceberg_hms_on_oss_kerberos_new")
 
 
-   /*--------HMS on COS-----------*/
+    /*--------HMS on COS-----------*/
     warehouse = """
                    'warehouse' = 
'cosn://${cos_parent_path}/iceberg-hms-cos-warehouse',
     """
@@ -270,7 +368,7 @@ suite("iceberg_on_hms_and_filesystem_and_dlf", 
"p2,external,new_catalog_property
       """
     testQueryAndInsert(iceberg_hms_type_prop + hms_prop
             + warehouse + s3_storage_properties, "iceberg_hms_on_s3")
- 
+
     /*--------HMS on HDFS-----------*/
     warehouse = """
      'warehouse' = '${hdfs_parent_path}/iceberg-hms-hdfs-warehouse',
@@ -281,10 +379,10 @@ suite("iceberg_on_hms_and_filesystem_and_dlf", 
"p2,external,new_catalog_property
      'warehouse' = 'hdfs://${externalEnvIp}:8520/iceberg-hms-hdfs-warehouse',
     """
     //old kerberos
-   testQueryAndInsert(iceberg_hms_type_prop + 
hms_kerberos_old_prop_not_include_kerberos_prop+ warehouse + 
hdfs_kerberos_properties, "iceberg_hms_on_hdfs_kerberos_old")
+    testQueryAndInsert(iceberg_hms_type_prop + 
hms_kerberos_old_prop_not_include_kerberos_prop+ warehouse + 
hdfs_kerberos_properties, "iceberg_hms_on_hdfs_kerberos_old")
     //new  kerberos
     testQueryAndInsert(iceberg_hms_type_prop + hms_kerberos_new_prop + 
warehouse + hdfs_kerberos_properties, "iceberg_hms_on_hdfs_kerberos_hdfs")
-     
+
 
     /*--------HMS END-----------*/
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to