This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 089ef91067d branch-3.0: [opt](storage vault) Check `s3.root.path` 
cannot be empty #47078 (#47196)
089ef91067d is described below

commit 089ef91067df9fd374ed61849423ff86258189f4
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Sun Jan 19 17:55:16 2025 +0800

    branch-3.0: [opt](storage vault) Check `s3.root.path` cannot be empty 
#47078 (#47196)
    
    Cherry-picked from #47078
    
    Co-authored-by: Lei Zhang <zhang...@selectdb.com>
---
 cloud/src/meta-service/meta_service_resource.cpp   |  3 +-
 .../property/constants/S3Properties.java           |  2 +
 .../vault_p0/create/test_create_vault.groovy       | 63 ++++++++++++++++++++++
 3 files changed, 67 insertions(+), 1 deletion(-)

diff --git a/cloud/src/meta-service/meta_service_resource.cpp 
b/cloud/src/meta-service/meta_service_resource.cpp
index d873dec7b21..a5b107534ad 100644
--- a/cloud/src/meta-service/meta_service_resource.cpp
+++ b/cloud/src/meta-service/meta_service_resource.cpp
@@ -1287,7 +1287,8 @@ void 
MetaServiceImpl::alter_obj_store_info(google::protobuf::RpcController* cont
             return;
         }
         // ATTN: prefix may be empty
-        if (ak.empty() || sk.empty() || bucket.empty() || endpoint.empty() || 
region.empty()) {
+        if (ak.empty() || sk.empty() || bucket.empty() || endpoint.empty() || 
region.empty() ||
+            prefix.empty()) {
             code = MetaServiceCode::INVALID_ARGUMENT;
             msg = "s3 conf info err, please check it";
             return;
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/constants/S3Properties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/constants/S3Properties.java
index 7e94d81518e..d5eb0a1f1b3 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/constants/S3Properties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/constants/S3Properties.java
@@ -323,6 +323,8 @@ public class S3Properties extends BaseProperties {
             builder.setSk(properties.get(S3Properties.SECRET_KEY));
         }
         if (properties.containsKey(S3Properties.ROOT_PATH)) {
+            
Preconditions.checkArgument(!Strings.isNullOrEmpty(properties.get(S3Properties.ROOT_PATH)),
+                    "%s cannot be empty", S3Properties.ROOT_PATH);
             builder.setPrefix(properties.get(S3Properties.ROOT_PATH));
         }
         if (properties.containsKey(S3Properties.BUCKET)) {
diff --git a/regression-test/suites/vault_p0/create/test_create_vault.groovy 
b/regression-test/suites/vault_p0/create/test_create_vault.groovy
index b545b3eff79..49ea2565cc6 100644
--- a/regression-test/suites/vault_p0/create/test_create_vault.groovy
+++ b/regression-test/suites/vault_p0/create/test_create_vault.groovy
@@ -15,6 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
 suite("test_create_vault", "nonConcurrent") {
     def suiteName = name;
     if (!isCloudMode()) {
@@ -31,6 +34,48 @@ suite("test_create_vault", "nonConcurrent") {
     def s3VaultName = "s3_" + randomStr
     def hdfsVaultName = "hdfs_" + randomStr
 
+    def length64Str = Stream.generate(() -> String.valueOf('a'))
+                     .limit(32) 
+                     .collect(Collectors.joining()) + randomStr
+
+    def exceed64LengthStr = length64Str + "a"
+
+    // test long length storage vault
+    expectExceptionLike({
+        sql """
+            CREATE STORAGE VAULT ${exceed64LengthStr}
+            PROPERTIES (
+                "type"="S3",
+                "fs.defaultFS"="${getHmsHdfsFs()}",
+                "path_prefix" = "${exceed64LengthStr}",
+                "hadoop.username" = "${getHmsUser()}"
+            );
+           """
+    }, "Incorrect vault name")
+
+    sql """
+        CREATE STORAGE VAULT ${length64Str}
+        PROPERTIES (
+            "type"="HDFS",
+            "fs.defaultFS"="${getHmsHdfsFs()}",
+            "path_prefix" = "${length64Str}",
+            "hadoop.username" = "${getHmsUser()}"
+        );
+    """
+
+    expectExceptionLike({
+        sql """
+            CREATE STORAGE VAULT ${s3VaultName}
+            PROPERTIES (
+                "type"="S3",
+                "fs.defaultFS"="${getHmsHdfsFs()}",
+                "path_prefix" = "${s3VaultName}",
+                "hadoop.username" = "${getHmsUser()}"
+            );
+           """
+    }, "Missing [s3.endpoint] in properties")
+
+
     expectExceptionLike({
         sql """
             CREATE STORAGE VAULT ${s3VaultName}
@@ -63,6 +108,24 @@ suite("test_create_vault", "nonConcurrent") {
         """
     }, "Storage vault 'not_exist_vault' does not exist")
 
+    // test s3.root.path cannot be empty
+    expectExceptionLike({
+        sql """
+            CREATE STORAGE VAULT ${s3VaultName}
+            PROPERTIES (
+                "type"="S3",
+                "s3.endpoint"="${getS3Endpoint()}",
+                "s3.region" = "${getS3Region()}",
+                "s3.access_key" = "${getS3AK()}",
+                "s3.secret_key" = "${getS3SK()}",
+                "s3.root.path" = "",
+                "s3.bucket" = "${getS3BucketName()}",
+                "s3.external_endpoint" = "",
+                "provider" = "${getS3Provider()}",
+                "use_path_style" = "false"
+            );
+        """
+    }, "cannot be empty")
 
     // test `if not exist` and dup name s3 vault
     sql """


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to