This is an automated email from the ASF dual-hosted git repository.

morrysnow pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new e9a1095371a branch-3.1: [test](iceberg) add polaris test #55484 
(#55557)
e9a1095371a is described below

commit e9a1095371a2271030de62e958e5021438b1e495
Author: zy-kkk <[email protected]>
AuthorDate: Fri Sep 5 18:19:56 2025 +0800

    branch-3.1: [test](iceberg) add polaris test #55484 (#55557)
    
    bp #55484
---
 .../docker-compose/polaris/docker-compose.yaml.tpl | 121 ++++++++
 .../docker-compose/polaris/init-catalog.sh         | 145 +++++++++
 .../docker-compose/polaris/polaris_settings.env    |  37 +++
 docker/thirdparties/run-thirdparties-docker.sh     |  31 +-
 regression-test/conf/regression-conf.groovy        |   4 +
 .../pipeline/external/conf/regression-conf.groovy  |   4 +
 .../nonConcurrent/conf/regression-conf.groovy      |   4 +
 .../pipeline/p0/conf/regression-conf.groovy        |   4 +
 .../external_table_p0/polaris/test_polaris.groovy  | 330 +++++++++++++++++++++
 9 files changed, 678 insertions(+), 2 deletions(-)

diff --git a/docker/thirdparties/docker-compose/polaris/docker-compose.yaml.tpl 
b/docker/thirdparties/docker-compose/polaris/docker-compose.yaml.tpl
new file mode 100644
index 00000000000..bb2effb2e4c
--- /dev/null
+++ b/docker/thirdparties/docker-compose/polaris/docker-compose.yaml.tpl
@@ -0,0 +1,121 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+version: "3.8"
+
+services:
+  # MinIO: S3 compatible object storage for local dev
+  minio:
+    image: minio/minio:RELEASE.2025-01-20T14-49-07Z
+    container_name: ${CONTAINER_UID}polaris-minio
+    ports:
+      - "${MINIO_API_PORT}:9000"
+      - "${MINIO_CONSOLE_PORT}:9001"
+    environment:
+      - MINIO_ROOT_USER=${MINIO_ACCESS_KEY}
+      - MINIO_ROOT_PASSWORD=${MINIO_SECRET_KEY}
+      - MINIO_DOMAIN=minio
+    command: ["server", "/data", "--console-address", ":9001"]
+    healthcheck:
+      test: [ "CMD", "curl", "-f", "http://localhost:9000/minio/health/ready"; ]
+      interval: 10s
+      timeout: 60s
+      retries: 30
+    networks:
+      ${CONTAINER_UID}polaris:
+        aliases:
+          - warehouse.minio
+
+  # MinIO client to bootstrap bucket and path
+  minio-client:
+    image: minio/mc:RELEASE.2025-01-17T23-25-50Z
+    container_name: ${CONTAINER_UID}polaris-mc
+    depends_on:
+      minio:
+        condition: service_healthy
+    entrypoint: >
+      /bin/sh -c "
+      until (mc alias set minio http://minio:9000 ${MINIO_ACCESS_KEY} 
${MINIO_SECRET_KEY}) do echo '...waiting...' && sleep 1; done;
+      mc rm -r --force minio/${MINIO_BUCKET} || echo 'warehouse bucket does 
not exist yet, continuing...';
+      mc mb minio/${MINIO_BUCKET} || echo 'warehouse bucket already exists, 
skipping...';
+      mc anonymous set public minio/${MINIO_BUCKET};
+      echo 'MinIO setup completed successfully';
+      "
+    networks:
+      - ${CONTAINER_UID}polaris
+
+  # S3 Catalog with AWS S3
+  polaris:
+    image: apache/polaris:1.0.1-incubating
+    container_name: ${CONTAINER_UID}polaris
+    depends_on:
+      minio:
+        condition: service_healthy
+      minio-client:
+        condition: service_completed_successfully
+    ports:
+      - "${POLARIS_S3_PORT}:8181"
+      - "${POLARIS_S3_ADMIN_PORT}:8182"
+    environment:
+      # Basic configuration
+      POLARIS_BOOTSTRAP_CREDENTIALS: 
default-realm,root,${POLARIS_BOOTSTRAP_PASSWORD}
+      polaris.features.DROP_WITH_PURGE_ENABLED: "true"
+      polaris.realm-context.realms: default-realm
+      
+      # MinIO credentials and endpoints (S3-compatible)
+      AWS_REGION: ${AWSRegion}
+      AWS_ACCESS_KEY_ID: ${MINIO_ACCESS_KEY}
+      AWS_SECRET_ACCESS_KEY: ${MINIO_SECRET_KEY}
+      AWS_ENDPOINT_URL_S3: http://minio:9000
+      AWS_ENDPOINT_URL_STS: http://minio:9000
+      
+      # Logging configuration
+      QUARKUS_LOG_LEVEL: INFO
+      QUARKUS_LOG_CATEGORY_"org.apache.polaris".LEVEL: DEBUG
+    healthcheck:
+      test: ["CMD", "curl", "-f", "http://localhost:8182/q/health";]
+      interval: 15s
+      timeout: 10s
+      retries: 5
+      start_period: 30s
+    networks:
+      - ${CONTAINER_UID}polaris
+
+  # Initialize a Polaris INTERNAL catalog pointing to MinIO
+  polaris-init:
+    image: curlimages/curl:8.11.1
+    container_name: ${CONTAINER_UID}polaris-init
+    depends_on:
+      polaris:
+        condition: service_healthy
+    environment:
+      POLARIS_HOST: polaris
+      POLARIS_PORT: 8181
+      POLARIS_BOOTSTRAP_USER: root
+      POLARIS_BOOTSTRAP_PASSWORD: ${POLARIS_BOOTSTRAP_PASSWORD}
+      POLARIS_CATALOG_NAME: ${POLARIS_CATALOG_NAME}
+      CATALOG_BASE_LOCATION: ${CATALOG_BASE_LOCATION}
+      AWSRegion: ${AWSRegion}
+    volumes:
+      - ./init-catalog.sh:/init-catalog.sh:ro
+    entrypoint: ["/bin/sh","-c","/init-catalog.sh"]
+    networks:
+      - ${CONTAINER_UID}polaris
+
+networks:
+  ${CONTAINER_UID}polaris:
+    name: ${CONTAINER_UID}polaris
diff --git a/docker/thirdparties/docker-compose/polaris/init-catalog.sh 
b/docker/thirdparties/docker-compose/polaris/init-catalog.sh
new file mode 100755
index 00000000000..124e41845be
--- /dev/null
+++ b/docker/thirdparties/docker-compose/polaris/init-catalog.sh
@@ -0,0 +1,145 @@
+#!/bin/sh
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eu
+
+HOST=${POLARIS_HOST:-polaris-s3}
+PORT=${POLARIS_PORT:-8181}
+USER=${POLARIS_BOOTSTRAP_USER:-root}
+PASS=${POLARIS_BOOTSTRAP_PASSWORD:-secret123}
+CATALOG=${POLARIS_CATALOG_NAME:-minio}
+BASE_LOCATION=${CATALOG_BASE_LOCATION:-s3://warehouse/wh/}
+
+echo "[polaris-init] Waiting for Polaris health check at 
http://$HOST:$PORT/q/health ..."
+for i in $(seq 1 120); do
+  if curl -sSf "http://$HOST:8182/q/health"; >/dev/null; then
+    break
+  fi
+  sleep 2
+done
+
+echo "[polaris-init] Fetching OAuth token via client_credentials ..."
+# Try to obtain token using correct OAuth endpoint
+TOKEN_JSON=$(curl -sS \
+  -X POST "http://$HOST:$PORT/api/catalog/v1/oauth/tokens"; \
+  -H 'Content-Type: application/x-www-form-urlencoded' \
+  -d 
"grant_type=client_credentials&client_id=$USER&client_secret=$PASS&scope=PRINCIPAL_ROLE:ALL")
+
+# Extract access_token field
+TOKEN=$(printf "%s" "$TOKEN_JSON" | sed -n 
's/.*"access_token"\s*:\s*"\([^"]*\)".*/\1/p')
+
+if [ -z "$TOKEN" ]; then
+  echo "[polaris-init] ERROR: Failed to obtain OAuth token. Response: 
$TOKEN_JSON" >&2
+  exit 1
+fi
+
+echo "[polaris-init] Creating catalog '$CATALOG' with base '$BASE_LOCATION' 
..."
+CREATE_PAYLOAD=$(cat <<JSON
+{
+  "name": "$CATALOG",
+  "type": "INTERNAL",
+  "properties": {
+    "default-base-location": "$BASE_LOCATION",
+    "s3.endpoint": "http://minio:9000";,
+    "s3.path-style-access": "true",
+    "s3.access-key-id": "admin",
+    "s3.secret-access-key": "password",
+    "s3.region": "${AWSRegion:-us-east-1}"
+  },
+  "storageConfigInfo": {
+    "roleArn": "arn:aws:iam::000000000000:role/minio-polaris-role",
+    "storageType": "S3",
+    "allowedLocations": ["$BASE_LOCATION"]
+  }
+}
+JSON
+)
+
+# Try create; on 409 Conflict, treat as success
+HTTP_CODE=$(curl -sS -o /tmp/resp.json -w "%{http_code}" \
+  -X POST "http://$HOST:$PORT/api/management/v1/catalogs"; \
+  -H "Authorization: Bearer $TOKEN" \
+  -H "Content-Type: application/json" \
+  -d "$CREATE_PAYLOAD")
+
+if [ "$HTTP_CODE" = "201" ]; then
+  echo "[polaris-init] Catalog created."
+elif [ "$HTTP_CODE" = "409" ]; then
+  echo "[polaris-init] Catalog already exists. Skipping."
+else
+  echo "[polaris-init] Create catalog failed (HTTP $HTTP_CODE):"
+  cat /tmp/resp.json || true
+  exit 1
+fi
+
+echo "[polaris-init] Setting up permissions for catalog '$CATALOG' ..."
+
+# Create a catalog admin role grants
+echo "[polaris-init] Creating catalog admin role grants ..."
+HTTP_CODE=$(curl -sS -o /tmp/resp.json -w "%{http_code}" \
+  -X PUT 
"http://$HOST:$PORT/api/management/v1/catalogs/$CATALOG/catalog-roles/catalog_admin/grants";
 \
+  -H "Authorization: Bearer $TOKEN" \
+  -H "Content-Type: application/json" \
+  -d '{"grant":{"type":"catalog", "privilege":"CATALOG_MANAGE_CONTENT"}}')
+
+if [ "$HTTP_CODE" != "200" ] && [ "$HTTP_CODE" != "201" ]; then
+  echo "[polaris-init] Warning: Failed to create catalog admin grants (HTTP 
$HTTP_CODE)"
+  cat /tmp/resp.json || true
+fi
+
+# Create a data engineer role
+echo "[polaris-init] Creating data engineer role ..."
+HTTP_CODE=$(curl -sS -o /tmp/resp.json -w "%{http_code}" \
+  -X POST "http://$HOST:$PORT/api/management/v1/principal-roles"; \
+  -H "Authorization: Bearer $TOKEN" \
+  -H "Content-Type: application/json" \
+  -d '{"principalRole":{"name":"data_engineer"}}')
+
+if [ "$HTTP_CODE" != "200" ] && [ "$HTTP_CODE" != "201" ] && [ "$HTTP_CODE" != 
"409" ]; then
+  echo "[polaris-init] Warning: Failed to create data engineer role (HTTP 
$HTTP_CODE)"
+  cat /tmp/resp.json || true
+fi
+
+# Connect the roles
+echo "[polaris-init] Connecting roles ..."
+HTTP_CODE=$(curl -sS -o /tmp/resp.json -w "%{http_code}" \
+  -X PUT 
"http://$HOST:$PORT/api/management/v1/principal-roles/data_engineer/catalog-roles/$CATALOG";
 \
+  -H "Authorization: Bearer $TOKEN" \
+  -H "Content-Type: application/json" \
+  -d '{"catalogRole":{"name":"catalog_admin"}}')
+
+if [ "$HTTP_CODE" != "200" ] && [ "$HTTP_CODE" != "201" ]; then
+  echo "[polaris-init] Warning: Failed to connect roles (HTTP $HTTP_CODE)"
+  cat /tmp/resp.json || true
+fi
+
+# Give root the data engineer role
+echo "[polaris-init] Assigning data engineer role to root ..."
+HTTP_CODE=$(curl -sS -o /tmp/resp.json -w "%{http_code}" \
+  -X PUT 
"http://$HOST:$PORT/api/management/v1/principals/root/principal-roles"; \
+  -H "Authorization: Bearer $TOKEN" \
+  -H "Content-Type: application/json" \
+  -d '{"principalRole": {"name":"data_engineer"}}')
+
+if [ "$HTTP_CODE" != "200" ] && [ "$HTTP_CODE" != "201" ]; then
+  echo "[polaris-init] Warning: Failed to assign data engineer role to root 
(HTTP $HTTP_CODE)"
+  cat /tmp/resp.json || true
+fi
+
+echo "[polaris-init] Permissions setup completed."
+echo "[polaris-init] Done."
+
diff --git a/docker/thirdparties/docker-compose/polaris/polaris_settings.env 
b/docker/thirdparties/docker-compose/polaris/polaris_settings.env
new file mode 100644
index 00000000000..70ba1a96b93
--- /dev/null
+++ b/docker/thirdparties/docker-compose/polaris/polaris_settings.env
@@ -0,0 +1,37 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Polaris uses 20181-20182 for S3
+export POLARIS_S3_PORT=20181
+export POLARIS_S3_ADMIN_PORT=20182
+
+# Polaris authentication
+export POLARIS_BOOTSTRAP_PASSWORD="secret123"
+
+# MinIO (S3-compatible) configuration for local development
+export MINIO_API_PORT=20001
+export MINIO_CONSOLE_PORT=20002
+export MINIO_ACCESS_KEY="admin"
+export MINIO_SECRET_KEY="password"
+export MINIO_BUCKET="warehouse"
+export MINIO_BUCKET_PREFIX="wh"
+
+# Polaris catalog defaults (point to MinIO bucket/prefix)
+export AWSRegion="us-east-1"
+export POLARIS_CATALOG_NAME="doris_test"
+export CATALOG_BASE_LOCATION="s3://${MINIO_BUCKET}/${MINIO_BUCKET_PREFIX}/"
diff --git a/docker/thirdparties/run-thirdparties-docker.sh 
b/docker/thirdparties/run-thirdparties-docker.sh
index 7a27d77adfe..950d0ef656a 100755
--- a/docker/thirdparties/run-thirdparties-docker.sh
+++ b/docker/thirdparties/run-thirdparties-docker.sh
@@ -39,12 +39,12 @@ Usage: $0 <options>
      --no-load-data     do not load data into the components
 
   All valid components:
-    
mysql,pg,oracle,sqlserver,clickhouse,es,hive2,hive3,iceberg,iceberg-rest,hudi,trino,kafka,mariadb,db2,oceanbase,lakesoul,kerberos,ranger
+    
mysql,pg,oracle,sqlserver,clickhouse,es,hive2,hive3,iceberg,iceberg-rest,hudi,trino,kafka,mariadb,db2,oceanbase,lakesoul,kerberos,ranger,polaris
   "
     exit 1
 }
 
DEFAULT_COMPONENTS="mysql,es,hive2,hive3,pg,oracle,sqlserver,clickhouse,mariadb,iceberg,db2,oceanbase,kerberos,minio"
-ALL_COMPONENTS="${DEFAULT_COMPONENTS},hudi,trino,kafka,spark,lakesoul,ranger"
+ALL_COMPONENTS="${DEFAULT_COMPONENTS},hudi,trino,kafka,spark,lakesoul,ranger,polaris"
 COMPONENTS=$2
 HELP=0
 STOP=0
@@ -160,6 +160,7 @@ RUN_LAKESOUL=0
 RUN_KERBEROS=0
 RUN_MINIO=0
 RUN_RANGER=0
+RUN_POLARIS=0
 
 RESERVED_PORTS="65535"
 
@@ -207,6 +208,8 @@ for element in "${COMPONENTS_ARR[@]}"; do
         RUN_MINIO=1
     elif [[ "${element}"x == "ranger"x ]]; then
         RUN_RANGER=1
+    elif [[ "${element}"x == "polaris"x ]]; then
+        RUN_POLARIS=1
     else
         echo "Invalid component: ${element}"
         usage
@@ -664,6 +667,24 @@ start_minio() {
     fi
 }
 
+start_polaris() {
+    echo "RUN_POLARIS"
+    local POLARIS_DIR="${ROOT}/docker-compose/polaris"
+    # Render compose with envsubst since settings is a bash export file
+    export CONTAINER_UID=${CONTAINER_UID}
+    . "${POLARIS_DIR}/polaris_settings.env"
+    if command -v envsubst >/dev/null 2>&1; then
+        envsubst <"${POLARIS_DIR}/docker-compose.yaml.tpl" 
>"${POLARIS_DIR}/docker-compose.yaml"
+    else
+        # Fallback: let docker compose handle variable substitution from 
current shell env
+        cp "${POLARIS_DIR}/docker-compose.yaml.tpl" 
"${POLARIS_DIR}/docker-compose.yaml"
+    fi
+    sudo docker compose -f "${POLARIS_DIR}/docker-compose.yaml" down
+    if [[ "${STOP}" -ne 1 ]]; then
+        sudo docker compose -f "${POLARIS_DIR}/docker-compose.yaml" up -d 
--wait --remove-orphans
+    fi
+}
+
 start_ranger() {
     echo "RUN_RANGER"
     export CONTAINER_UID=${CONTAINER_UID}
@@ -794,6 +815,12 @@ if [[ "${RUN_MINIO}" -eq 1 ]]; then
     start_minio > start_minio.log 2>&1 &
     pids["minio"]=$!
 fi
+
+if [[ "${RUN_POLARIS}" -eq 1 ]]; then
+    start_polaris > start_polaris.log 2>&1 &
+    pids["polaris"]=$!
+fi
+
 if [[ "${RUN_KERBEROS}" -eq 1 ]]; then
     start_kerberos > start_kerberos.log 2>&1 &
     pids["kerberos"]=$!
diff --git a/regression-test/conf/regression-conf.groovy 
b/regression-test/conf/regression-conf.groovy
index e566ce8351b..a687a3c9fdf 100644
--- a/regression-test/conf/regression-conf.groovy
+++ b/regression-test/conf/regression-conf.groovy
@@ -235,6 +235,10 @@ iceberg_rest_uri_port_s3=19181
 iceberg_rest_uri_port_oss=19182
 iceberg_rest_uri_port_cos=19183
 
+// polaris rest catalog config
+polaris_rest_uri_port=20181
+polaris_minio_port=20001
+
 // If the failure suite num exceeds this config
 // all following suite will be skipped to fast quit the run.
 // <=0 means no limit.
diff --git a/regression-test/pipeline/external/conf/regression-conf.groovy 
b/regression-test/pipeline/external/conf/regression-conf.groovy
index 2f2031191ad..8b513458d0e 100644
--- a/regression-test/pipeline/external/conf/regression-conf.groovy
+++ b/regression-test/pipeline/external/conf/regression-conf.groovy
@@ -134,6 +134,10 @@ iceberg_rest_uri_port_cos=19183
 iceberg_minio_port=19001
 enableIcebergTest=true
 
+// polaris rest catalog config
+polaris_rest_uri_port=20181
+polaris_minio_port=20001
+
 enableEsTest=true
 es_5_port=59200
 es_6_port=19200
diff --git a/regression-test/pipeline/nonConcurrent/conf/regression-conf.groovy 
b/regression-test/pipeline/nonConcurrent/conf/regression-conf.groovy
index f0c690e4dbe..922fe374a25 100644
--- a/regression-test/pipeline/nonConcurrent/conf/regression-conf.groovy
+++ b/regression-test/pipeline/nonConcurrent/conf/regression-conf.groovy
@@ -123,6 +123,10 @@ iceberg_rest_uri_port_s3=19181
 iceberg_rest_uri_port_oss=19182
 iceberg_rest_uri_port_cos=19183
 
+// polaris rest catalog config
+polaris_rest_uri_port=20181
+polaris_minio_port=20001
+
 enableEsTest=false
 es_6_port=19200
 es_7_port=29200
diff --git a/regression-test/pipeline/p0/conf/regression-conf.groovy 
b/regression-test/pipeline/p0/conf/regression-conf.groovy
index 6531e7e6d3c..efe158f0887 100644
--- a/regression-test/pipeline/p0/conf/regression-conf.groovy
+++ b/regression-test/pipeline/p0/conf/regression-conf.groovy
@@ -140,6 +140,10 @@ iceberg_rest_uri_port_s3=19181
 iceberg_rest_uri_port_oss=19182
 iceberg_rest_uri_port_cos=19183
 
+// polaris rest catalog config
+polaris_rest_uri_port=20181
+polaris_minio_port=20001
+
 enableEsTest=false
 es_6_port=19200
 es_7_port=29200
diff --git 
a/regression-test/suites/external_table_p0/polaris/test_polaris.groovy 
b/regression-test/suites/external_table_p0/polaris/test_polaris.groovy
new file mode 100644
index 00000000000..6e49c80fb72
--- /dev/null
+++ b/regression-test/suites/external_table_p0/polaris/test_polaris.groovy
@@ -0,0 +1,330 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_polaris", 
"p0,external,iceberg,polaris,external_docker,external_docker_polaris,new_catalog_property")
 {
+
+    def testQueryAndInsert = { String catalogProperties, String prefix ->
+
+        // =======  BASIC CATALOG AND DATABASE SETUP  =======
+        def catalog_name = "${prefix}_catalog"
+        sql """
+            DROP CATALOG IF EXISTS ${catalog_name};
+        """
+        sql """
+            CREATE CATALOG IF NOT EXISTS ${catalog_name} PROPERTIES (
+                ${catalogProperties}
+            );
+        """
+        sql """
+            switch ${catalog_name};
+        """
+
+        def db_name = prefix + "_db"
+        sql """
+            DROP DATABASE IF EXISTS ${db_name} FORCE;
+        """
+        sql """
+            CREATE DATABASE IF NOT EXISTS ${db_name};
+        """
+
+        def dbResult = sql """
+            show databases  like "${db_name}";
+        """
+        assert dbResult.size() == 1
+
+        sql """
+            use ${db_name};
+        """
+        // =======  BASIC TABLE OPERATIONS TEST  =======
+        def table_name = prefix + "_table"
+        sql """
+            CREATE TABLE ${table_name} (
+            user_id            BIGINT       NOT NULL COMMENT "user id",
+            name               VARCHAR(20)           COMMENT "name",
+            age                INT                   COMMENT "age"
+        );
+        """
+        sql """
+            insert into ${table_name} values (1, 'a', 10);
+        """
+        // query
+        def queryResult = sql """
+            SELECT * FROM ${table_name};
+        """
+        assert queryResult.size() == 1
+
+        // =======  BRANCH/TAG TEST  =======
+        def branch_name = prefix + "_branch"
+        def tag_name = prefix + "_tag"
+        sql """
+            ALTER TABLE ${table_name} CREATE BRANCH ${branch_name};
+        """
+        sql """
+            ALTER TABLE ${table_name} CREATE TAG ${tag_name};
+        """
+        sql """
+            INSERT OVERWRITE TABLE ${table_name} VALUES (1, 'a', 10),(2, 'b', 
20), (3, 'c', 30)
+        """
+        def originalQueryResult = sql """
+            SELECT * FROM ${table_name};
+        """
+        assert originalQueryResult.size() == 3
+        sql """
+            insert into ${table_name}@branch(${branch_name}) values (4, 'd', 
40)
+        """
+        def branchQueryResult = sql """
+            SELECT * FROM ${table_name}@branch(${branch_name});
+        """
+        assert branchQueryResult.size() == 2
+
+
+        def tagQueryResult = sql """
+            SELECT * FROM ${table_name}@tag(${tag_name});
+        """
+        assert tagQueryResult.size() == 1
+
+        // Note: Tags are read-only in Iceberg, only branches support write 
operations
+
+        sql """
+            ALTER TABLE ${table_name} drop branch ${branch_name};
+        """
+        sql """
+            ALTER TABLE ${table_name} drop tag ${tag_name};
+        """
+        // =======  SYSTEM TABLES TEST  =======
+        // Test $files system table
+        def files_result = sql """
+        SELECT * FROM ${table_name}\$files;
+    """
+        println "Files system table: " + files_result
+
+        // Test $entries system table
+        def entries_result = sql """
+        SELECT * FROM ${table_name}\$entries;
+    """
+        println "Entries system table: " + entries_result
+
+        // Test $history system table
+        def history_result = sql """
+        SELECT * FROM ${table_name}\$history;
+    """
+        println "History system table: " + history_result
+
+        // Test $manifests system table
+        def manifests_result = sql """
+        SELECT * FROM ${table_name}\$manifests;
+    """
+        println "Manifests system table: " + manifests_result
+
+        // Test $metadata_log_entries system table
+        def metadata_log_result = sql """
+        SELECT * FROM ${table_name}\$metadata_log_entries;
+    """
+        println "Metadata log entries system table: " + metadata_log_result
+
+        // Test $partitions system table
+        def partitions_result = sql """
+        SELECT * FROM ${table_name}\$partitions;
+    """
+        println "Partitions system table: " + partitions_result
+
+        // Test $refs system table
+        def refs_result = sql """
+        SELECT * FROM ${table_name}\$refs;
+    """
+        println "Refs system table: " + refs_result
+
+        // Test $snapshots system table
+        def snapshots_result = sql """
+        SELECT * FROM ${table_name}\$snapshots;
+    """
+        println "Snapshots system table: " + snapshots_result
+
+        println "All system tables test SUCCESS " + catalog_name
+
+        // =======  TIME TRAVEL TEST  =======
+        def iceberg_meta_result = sql """
+        SELECT snapshot_id FROM iceberg_meta(
+                'table' = '${catalog_name}.${db_name}.${table_name}',
+                'query_type' = 'snapshots'
+        ) order by committed_at desc;
+        """
+        def first_snapshot_id = iceberg_meta_result.get(0).get(0);
+        def time_travel = sql """
+        SELECT * FROM ${table_name} FOR VERSION AS OF ${first_snapshot_id};
+        """
+        println time_travel
+        println "iceberg_time_travel_QUERY SUCCESS " + catalog_name
+
+
+        sql """
+            DROP TABLE ${table_name};
+        """
+
+        // =======  PARTITION TABLE TEST  =======
+        table_name = prefix + "_partition_table"
+        sql """
+            CREATE TABLE ${table_name} (
+              `ts` DATETIME COMMENT 'ts',
+              `col1` BOOLEAN COMMENT 'col1',
+              `col2` INT COMMENT 'col2',
+              `col3` BIGINT COMMENT 'col3',
+              `col4` FLOAT COMMENT 'col4',
+              `col5` DOUBLE COMMENT 'col5',
+              `col6` DECIMAL(9,4) COMMENT 'col6',
+              `col7` STRING COMMENT 'col7',
+              `col8` DATE COMMENT 'col8',
+              `col9` DATETIME COMMENT 'col9',
+              `pt1` STRING COMMENT 'pt1',
+              `pt2` STRING COMMENT 'pt2'
+            )
+            PARTITION BY LIST (day(ts), pt1, pt2) ()
+            PROPERTIES (
+              'write-format'='orc',
+              'compression-codec'='zlib'
+            );
+        """
+
+        sql """
+            INSERT OVERWRITE  TABLE ${table_name} values 
+            ('2023-01-01 00:00:00', true, 1, 1, 1.0, 1.0, 1.0000, '1', 
'2023-01-01', '2023-01-01 00:00:00', 'a', '1'),
+            ('2023-01-02 00:00:00', false, 2, 2, 2.0, 2.0, 2.0000, '2', 
'2023-01-02', '2023-01-02 00:00:00', 'b', '2'),
+            ('2023-01-03 00:00:00', true, 3, 3, 3.0, 3.0, 3.0000, '3', 
'2023-01-03', '2023-01-03 00:00:00', 'c', '3');
+        """
+        def partitionQueryResult = sql """
+            SELECT * FROM ${table_name} WHERE pt1='a' and pt2='1';
+        """
+        assert partitionQueryResult.size() == 1
+
+        // =======  PARTITION TABLE BRANCH/TAG TEST  =======
+        branch_name = prefix + "_partition_branch"
+        tag_name = prefix + "_partition_tag"
+
+        sql """
+            ALTER TABLE ${table_name} CREATE BRANCH ${branch_name};
+        """
+        sql """
+            ALTER TABLE ${table_name} CREATE TAG ${tag_name};
+        """
+
+        // Partition table branch write operation
+        sql """
+            insert into ${table_name}@branch(${branch_name}) values 
('2023-01-04 00:00:00', false, 4, 4, 4.0, 4.0, 4.0000, '4', '2023-01-04', 
'2023-01-04 00:00:00', 'd', '4')
+        """
+
+        def partitionBranchResult = sql """
+            SELECT * FROM ${table_name}@branch(${branch_name}) ORDER BY col2;
+        """
+        println "Partition table branch query: " + partitionBranchResult
+
+        def partitionTagResult = sql """
+            SELECT * FROM ${table_name}@tag(${tag_name}) ORDER BY col2;
+        """
+        println "Partition table tag query: " + partitionTagResult
+
+        // Test partition table system tables
+        def partition_files_result = sql """
+            SELECT * FROM ${table_name}\$partitions;
+        """
+        println "Partitions system table: " + partition_files_result
+
+        sql """
+            ALTER TABLE ${table_name} drop branch ${branch_name};
+        """
+        sql """
+            ALTER TABLE ${table_name} drop tag ${tag_name};
+        """
+
+        sql """
+            DROP TABLE ${table_name};
+        """
+
+        sql """
+            DROP DATABASE ${db_name} FORCE;
+        """
+
+        def dropResult = sql """
+            show databases  like "${db_name}";
+        """
+        assert dropResult.size() == 0
+    }
+
+    String enabled = context.config.otherConfigs.get("enableIcebergTest")
+    if (enabled != null && enabled.equalsIgnoreCase("true")) {
+        /* REST catalog env and base properties */
+        String externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
+
+        String polaris_port = 
context.config.otherConfigs.get("polaris_rest_uri_port")
+        String minio_port = 
context.config.otherConfigs.get("polaris_minio_port")
+
+        String polaris_credential = """
+            'type'='iceberg',
+            'iceberg.catalog.type'='rest',
+            'iceberg.rest.uri' = 
'http://${externalEnvIp}:${polaris_port}/api/catalog',
+            'iceberg.rest.security.type' = 'oauth2',
+            'iceberg.rest.oauth2.credential' = 'root:secret123',
+            'iceberg.rest.oauth2.server-uri' = 
'http://${externalEnvIp}:${polaris_port}/api/catalog/v1/oauth/tokens',
+            'iceberg.rest.oauth2.scope' = 'PRINCIPAL_ROLE:ALL',
+        """
+
+        String token = ["curl", "-s", 
"http://${externalEnvIp}:${polaris_port}/api/catalog/v1/oauth/tokens";,
+                        "--user", "root:secret123",
+                        "-d", "grant_type=client_credentials",
+                        "-d", "scope=PRINCIPAL_ROLE:ALL"]
+                .execute().text
+                .find(/"access_token":"([^"]*)"/) { match, token -> token }
+
+        String polaris_token = """
+            'type'='iceberg',
+            'iceberg.catalog.type'='rest',
+            'iceberg.rest.uri' = 
'http://${externalEnvIp}:${polaris_port}/api/catalog',
+            'iceberg.rest.security.type' = 'oauth2',
+            'iceberg.rest.oauth2.token' = '${token}',
+        """
+
+        /*-----MinIO------*/
+        /****************AK SK*******************/
+        String storage_properties_aksk = """
+            's3.access_key' = 'admin',
+            's3.secret_key' = 'password',
+            's3.endpoint' = 'http://${externalEnvIp}:${minio_port}',
+            's3.region' = 'us-east-1'
+        """
+        /****************NO AK SK*******************/
+        String storage_properties_vended = """
+            'iceberg.rest.vended-credentials-enabled' = 'true',
+            's3.endpoint' = 'http://${externalEnvIp}:${minio_port}',
+            's3.region' = 'us-east-1'
+        """
+
+        String warehouse = """
+         'warehouse' = 'doris_test',
+        """
+
+        // -------- POLARIS CREDENTIAL AKSK--------
+        testQueryAndInsert(polaris_credential + warehouse + 
storage_properties_aksk, "polaris_credential_aksk")
+
+        // -------- POLARIS CREDENTIAL VENDED --------
+        testQueryAndInsert(polaris_credential + warehouse + 
storage_properties_vended, "storage_properties_vended")
+
+        // -------- POLARIS TOKEN AKSK--------
+        testQueryAndInsert(polaris_token + warehouse + 
storage_properties_aksk, "polaris_token_aksk")
+
+        // -------- POLARIS TOKEN VENDED--------
+        testQueryAndInsert(polaris_token + warehouse + 
storage_properties_vended, "polaris_token_vended")
+    }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to