Copilot commented on code in PR #61706:
URL: https://github.com/apache/doris/pull/61706#discussion_r2986053456


##########
build.sh:
##########
@@ -756,6 +803,57 @@ function build_ui() {
     cp -r "${ui_dist}"/* "${DORIS_HOME}/fe/fe-core/src/main/resources/static"/
 }
 
+function build_fe_modules() {
+    local thread_count="${FE_MAVEN_THREADS:-1C}"
+    local retry_thread_count="${FE_MAVEN_RETRY_THREADS:-1}"
+    local log_file
+    local -a dependency_mvn_opts=()
+    local -a extra_mvn_opts=()
+    local -a user_settings_opts=()
+    local -a mvn_cmd=(
+        "${MVN_CMD}"
+        package
+        -pl
+        "${FE_MODULES}"
+        -am
+        -Dskip.doc=true
+        -DskipTests
+    )
+
+    if [[ "${DISABLE_JAVA_CHECK_STYLE}" = "ON" ]]; then
+        mvn_cmd+=("-Dcheckstyle.skip=true")
+    fi
+    if [[ -n "${MVN_OPT}" ]]; then
+        # shellcheck disable=SC2206
+        extra_mvn_opts=(${MVN_OPT})
+    fi
+    if [[ "${BUILD_OBS_DEPENDENCIES}" -eq 0 ]]; then
+        dependency_mvn_opts+=("-Dobs.dependency.scope=provided")
+    fi
+    if [[ "${BUILD_COS_DEPENDENCIES}" -eq 0 ]]; then
+        dependency_mvn_opts+=("-Dcos.dependency.scope=provided")
+    fi
+    if [[ -n "${USER_SETTINGS_MVN_REPO}" && -f "${USER_SETTINGS_MVN_REPO}" ]]; 
then
+        user_settings_opts=(-gs "${USER_SETTINGS_MVN_REPO}")
+    fi
+
+    mvn_cmd+=("${extra_mvn_opts[@]}" "${dependency_mvn_opts[@]}" 
"${user_settings_opts[@]}" -T "${thread_count}")
+    log_file="$(mktemp)"
+    if "${mvn_cmd[@]}" 2>&1 | tee "${log_file}"; then
+        rm -f "${log_file}"
+        return 0
+    fi
+    if [[ "${thread_count}" != "${retry_thread_count}" ]] && grep -Fq "Could 
not acquire lock(s)" "${log_file}"; then
+        echo "FE Maven build hit Maven resolver lock contention. Retrying with 
-T ${retry_thread_count}."
+        mvn_cmd=("${mvn_cmd[@]:0:${#mvn_cmd[@]}-2}" -T "${retry_thread_count}")
+        "${mvn_cmd[@]}"
+        rm -f "${log_file}"
+        return 0

Review Comment:
   In the Maven lock-contention retry path, the function returns success 
unconditionally. If the retry Maven invocation fails, the script will still 
return 0 and the build may proceed with missing/partial FE artifacts. Capture 
and check the retry command's exit status and return non-zero on failure (and 
ideally still print the saved log for diagnosis).
   ```suggestion
           if "${mvn_cmd[@]}"; then
               rm -f "${log_file}"
               return 0
           fi
           echo "FE Maven build retry with -T ${retry_thread_count} failed. 
Original lock-contention log follows:"
           cat "${log_file}"
           rm -f "${log_file}"
           return 1
   ```



##########
regression-test/suites/external_table_p0/refactor_storage_param/test_jfs_hms_catalog_read.groovy:
##########
@@ -0,0 +1,141 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_jfs_hms_catalog_read", "p0,external") {
+    String enableJfs = context.config.otherConfigs.get("enableJfsTest")
+    if (enableJfs == null || !enableJfs.equalsIgnoreCase("true")) {
+        logger.info("disable JFS test.")
+        return
+    }
+
+    String enableHive = context.config.otherConfigs.get("enableHiveTest")
+    if (enableHive == null || !enableHive.equalsIgnoreCase("true")) {
+        logger.info("disable Hive test.")
+        return
+    }
+
+    String jfsFs = context.config.otherConfigs.get("jfsFs")
+    if (jfsFs == null || jfsFs.trim().isEmpty()) {
+        logger.info("skip JFS test because jfsFs is empty.")
+        return
+    }
+
+    String jfsImpl = context.config.otherConfigs.get("jfsImpl")
+    if (jfsImpl == null || jfsImpl.trim().isEmpty()) {
+        jfsImpl = "io.juicefs.JuiceFileSystem"
+    }
+    String jfsMeta = context.config.otherConfigs.get("jfsMeta")
+    if (jfsMeta == null || jfsMeta.trim().isEmpty()) {
+        throw new IllegalStateException("jfsMeta must be configured for JFS 
data IO regression")
+    }
+    String jfsCluster = jfsFs.replaceFirst("^jfs://", "")
+    int slashPos = jfsCluster.indexOf("/")
+    if (slashPos > 0) {
+        jfsCluster = jfsCluster.substring(0, slashPos)
+    }
+    String jfsMetaProperty = ",\n                'juicefs.${jfsCluster}.meta' 
= '${jfsMeta}'"
+
+    String hdfsUser = context.config.otherConfigs.get("jfsHadoopUser")
+    if (hdfsUser == null || hdfsUser.trim().isEmpty()) {
+        hdfsUser = context.config.otherConfigs.get("hdfsUser")
+    }
+    if (hdfsUser == null || hdfsUser.trim().isEmpty()) {
+        hdfsUser = "root"
+    }
+
+    String hmsUris = context.config.otherConfigs.get("jfsHiveMetastoreUris")
+    if (hmsUris == null || hmsUris.trim().isEmpty()) {
+        String externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
+        String hmsPort = context.config.otherConfigs.get("hive3HmsPort")
+        if (hmsPort == null || hmsPort.trim().isEmpty()) {
+            hmsPort = context.config.otherConfigs.get("hive2HmsPort")
+        }
+        if (externalEnvIp == null || externalEnvIp.trim().isEmpty()
+                || hmsPort == null || hmsPort.trim().isEmpty()) {
+            logger.info("skip JFS test because jfsHiveMetastoreUris is empty 
and fallback externalEnvIp/hmsPort is invalid.")
+            return
+        }
+        hmsUris = "thrift://${externalEnvIp}:${hmsPort}"
+    }
+    String catalogName = "test_jfs_hms_catalog_read"
+    String dbName = "test_jfs_hms_catalog_read_db"
+    String tableName = "test_jfs_hms_catalog_read_tbl"
+    String jfsDbBasePath = context.config.otherConfigs.get("jfsDbBasePath")
+    if (jfsDbBasePath == null || jfsDbBasePath.trim().isEmpty()) {
+        jfsDbBasePath = "${jfsFs}/doris_jfs/${hdfsUser}"
+    }
+    jfsDbBasePath = jfsDbBasePath.replaceAll('/+$', '')
+    String jfsStagingDir = context.config.otherConfigs.get("jfsStagingDir")
+    if (jfsStagingDir == null || jfsStagingDir.trim().isEmpty()) {
+        jfsStagingDir = "${jfsDbBasePath}/.doris_staging"
+    }
+    jfsStagingDir = jfsStagingDir.replaceAll('/+$', '')
+    String dbLocation = "${jfsDbBasePath}/${dbName}"
+
+    sql """drop catalog if exists ${catalogName}"""
+
+    try {
+        sql """
+            CREATE CATALOG ${catalogName} PROPERTIES (
+                'type' = 'hms',
+                'hive.metastore.uris' = '${hmsUris}',
+                'fs.defaultFS' = '${jfsFs}',
+                'fs.jfs.impl' = '${jfsImpl}',
+                'hadoop.username' = '${hdfsUser}',
+                'hive.staging_dir' = '${jfsStagingDir}'
+                ${jfsMetaProperty}
+            );
+        """
+
+        sql """switch ${catalogName}"""
+        def dbs = sql """show databases"""
+        assertTrue(dbs.size() > 0)
+
+        def hasDb = sql """show databases like '${dbName}'"""
+        if (hasDb.size() > 0) {
+            sql """drop table if exists `${dbName}`.`${tableName}`"""
+            sql """drop database if exists `${dbName}`"""
+        }
+        sql """
+            create database `${dbName}`
+            properties('location'='${dbLocation}')
+        """
+        sql """use `${dbName}`"""
+        sql """
+            CREATE TABLE `${tableName}` (
+              `id` INT,
+              `name` STRING
+            ) ENGINE=hive
+            PROPERTIES (
+              'file_format'='parquet'
+            )
+        """
+        sql """insert into `${tableName}` values (1, 'jfs_1'), (2, 'jfs_2')"""
+
+        def cnt = sql """select count(*) from `${tableName}`"""
+        assertEquals("2", cnt[0][0].toString())
+
+        def rows = sql """select * from `${tableName}` order by id"""
+        assertTrue(rows.size() == 2)
+        assertEquals("1", rows[0][0].toString())
+        assertEquals("jfs_1", rows[0][1].toString())
+        assertEquals("2", rows[1][0].toString())
+        assertEquals("jfs_2", rows[1][1].toString())
+    } finally {
+        sql """switch internal"""

Review Comment:
   The suite creates and switches to a new catalog but the `finally` block only 
switches back to `internal` and never drops the created catalog. This can leave 
state behind for subsequent suites/reruns. Consider dropping the catalog in 
`finally` (e.g. switch internal then `drop catalog if exists ...`) so the test 
is self-cleaning even on failures.
   ```suggestion
           sql """switch internal"""
           sql """drop catalog if exists ${catalogName}"""
   ```



##########
docker/thirdparties/run-thirdparties-docker.sh:
##########
@@ -230,6 +230,167 @@ reserve_ports() {
     fi
 }
 
+JFS_META_FORMATTED=0
+DORIS_ROOT="$(cd "${ROOT}/../.." &>/dev/null && pwd)"
+. "${DORIS_ROOT}/thirdparty/juicefs-helpers.sh"
+JUICEFS_LOCAL_BIN="${DORIS_ROOT}/thirdparty/installed/juicefs_bin/juicefs"
+
+find_juicefs_hadoop_jar() {
+    local -a jar_globs=(
+        
"${DORIS_ROOT}/thirdparty/installed/juicefs_libs/juicefs-hadoop-[0-9]*.jar"
+        "${DORIS_ROOT}/output/fe/lib/juicefs/juicefs-hadoop-[0-9]*.jar"
+        
"${DORIS_ROOT}/output/be/lib/java_extensions/juicefs/juicefs-hadoop-[0-9]*.jar"
+        
"${DORIS_ROOT}/../../../clusterEnv/*/Cluster*/fe/lib/juicefs/juicefs-hadoop-[0-9]*.jar"
+        
"${DORIS_ROOT}/../../../clusterEnv/*/Cluster*/be/lib/java_extensions/juicefs/juicefs-hadoop-[0-9]*.jar"
+        
"/mnt/ssd01/pipline/OpenSourceDoris/clusterEnv/*/Cluster*/fe/lib/juicefs/juicefs-hadoop-[0-9]*.jar"
+        
"/mnt/ssd01/pipline/OpenSourceDoris/clusterEnv/*/Cluster*/be/lib/java_extensions/juicefs/juicefs-hadoop-[0-9]*.jar"
+    )

Review Comment:
   `find_juicefs_hadoop_jar()` includes hard-coded absolute paths under 
`/mnt/ssd01/pipline/...`, which are environment-specific and will fail (or add 
unnecessary filesystem scans) on most developer/CI machines. Please remove 
these absolute paths or gate them behind an explicit env var (e.g. a 
configurable search root) so the script remains portable.
   ```suggestion
       )
       if [[ -n "${JUICEFS_CLUSTER_ENV_ROOT:-}" ]]; then
           jar_globs+=(
               
"${JUICEFS_CLUSTER_ENV_ROOT}/clusterEnv/*/Cluster*/fe/lib/juicefs/juicefs-hadoop-[0-9]*.jar"
               
"${JUICEFS_CLUSTER_ENV_ROOT}/clusterEnv/*/Cluster*/be/lib/java_extensions/juicefs/juicefs-hadoop-[0-9]*.jar"
           )
       fi
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to