github-actions[bot] commented on code in PR #61031:
URL: https://github.com/apache/doris/pull/61031#discussion_r2900566360


##########
regression-test/suites/external_table_p0/refactor_storage_param/test_jfs_hms_catalog_read.groovy:
##########
@@ -0,0 +1,137 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_jfs_hms_catalog_read", "p0,external") {
+    String enableJfs = context.config.otherConfigs.get("enableJfsTest")
+    if (enableJfs == null || !enableJfs.equalsIgnoreCase("true")) {
+        logger.info("disable JFS test.")
+        return
+    }
+
+    String enableHive = context.config.otherConfigs.get("enableHiveTest")
+    if (enableHive == null || !enableHive.equalsIgnoreCase("true")) {
+        logger.info("disable Hive test.")
+        return
+    }
+
+    String jfsFs = context.config.otherConfigs.get("jfsFs")
+    if (jfsFs == null || jfsFs.trim().isEmpty()) {
+        logger.info("skip JFS test because jfsFs is empty.")
+        return
+    }
+
+    String jfsImpl = context.config.otherConfigs.get("jfsImpl")
+    if (jfsImpl == null || jfsImpl.trim().isEmpty()) {
+        jfsImpl = "io.juicefs.JuiceFileSystem"
+    }
+    String jfsMeta = context.config.otherConfigs.get("jfsMeta")
+    if (jfsMeta == null || jfsMeta.trim().isEmpty()) {
+        throw new IllegalStateException("jfsMeta must be configured for JFS 
data IO regression")
+    }
+    String jfsCluster = jfsFs.replaceFirst("^jfs://", "")
+    int slashPos = jfsCluster.indexOf("/")
+    if (slashPos > 0) {
+        jfsCluster = jfsCluster.substring(0, slashPos)
+    }
+    String jfsMetaProperty = ",\n                'juicefs.${jfsCluster}.meta' 
= '${jfsMeta}'"
+
+    String hdfsUser = context.config.otherConfigs.get("jfsHadoopUser")
+    if (hdfsUser == null || hdfsUser.trim().isEmpty()) {
+        hdfsUser = context.config.otherConfigs.get("hdfsUser")
+    }
+    if (hdfsUser == null || hdfsUser.trim().isEmpty()) {
+        hdfsUser = "root"
+    }
+
+    String externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
+    String hmsPort = context.config.otherConfigs.get("hive2HmsPort")
+    String hmsUris = context.config.otherConfigs.get("jfsHiveMetastoreUris")
+    if (hmsUris == null || hmsUris.trim().isEmpty()) {
+        hmsUris = "thrift://${externalEnvIp}:${hmsPort}"
+    }
+    String catalogName = "test_jfs_hms_catalog_read"
+    String dbName = "test_jfs_hms_catalog_read_db"
+    String tableName = "test_jfs_hms_catalog_read_tbl"
+    String jfsDbBasePath = context.config.otherConfigs.get("jfsDbBasePath")
+    if (jfsDbBasePath == null || jfsDbBasePath.trim().isEmpty()) {
+        jfsDbBasePath = "${jfsFs}/doris_jfs/${hdfsUser}"
+    }
+    jfsDbBasePath = jfsDbBasePath.replaceAll('/+$', '')
+    String jfsStagingDir = context.config.otherConfigs.get("jfsStagingDir")
+    if (jfsStagingDir == null || jfsStagingDir.trim().isEmpty()) {
+        jfsStagingDir = "${jfsDbBasePath}/.doris_staging"
+    }
+    jfsStagingDir = jfsStagingDir.replaceAll('/+$', '')
+    String dbLocation = "${jfsDbBasePath}/${dbName}"
+
+    try {
+        sql """drop catalog if exists ${catalogName}"""
+
+        sql """
+            CREATE CATALOG ${catalogName} PROPERTIES (
+                'type' = 'hms',
+                'hive.metastore.uris' = '${hmsUris}',
+                'fs.defaultFS' = '${jfsFs}',
+                'fs.jfs.impl' = '${jfsImpl}',
+                'hadoop.username' = '${hdfsUser}',
+                'hive.staging_dir' = '${jfsStagingDir}'
+                ${jfsMetaProperty}
+            );
+        """
+
+        sql """switch ${catalogName}"""
+        def dbs = sql """show databases"""
+        assertTrue(dbs.size() > 0)
+
+        sql """
+            create database if not exists `${dbName}`
+            properties('location'='${dbLocation}')

Review Comment:
   **[Low]** Per Doris regression test standards, the database should be 
dropped before creation rather than using `CREATE DATABASE IF NOT EXISTS`. A 
leftover database from a prior failed run might have stale state (e.g., 
different location property). Consider:
   ```groovy
   sql """drop database if exists `${dbName}`"""
   sql """
       create database `${dbName}`
       properties('location'='${dbLocation}')
   """
   ```



##########
regression-test/suites/external_table_p0/refactor_storage_param/test_jfs_hms_catalog_read.groovy:
##########
@@ -0,0 +1,137 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_jfs_hms_catalog_read", "p0,external") {
+    String enableJfs = context.config.otherConfigs.get("enableJfsTest")
+    if (enableJfs == null || !enableJfs.equalsIgnoreCase("true")) {
+        logger.info("disable JFS test.")
+        return
+    }
+
+    String enableHive = context.config.otherConfigs.get("enableHiveTest")
+    if (enableHive == null || !enableHive.equalsIgnoreCase("true")) {
+        logger.info("disable Hive test.")
+        return
+    }
+
+    String jfsFs = context.config.otherConfigs.get("jfsFs")
+    if (jfsFs == null || jfsFs.trim().isEmpty()) {
+        logger.info("skip JFS test because jfsFs is empty.")
+        return
+    }
+
+    String jfsImpl = context.config.otherConfigs.get("jfsImpl")
+    if (jfsImpl == null || jfsImpl.trim().isEmpty()) {
+        jfsImpl = "io.juicefs.JuiceFileSystem"
+    }
+    String jfsMeta = context.config.otherConfigs.get("jfsMeta")
+    if (jfsMeta == null || jfsMeta.trim().isEmpty()) {
+        throw new IllegalStateException("jfsMeta must be configured for JFS 
data IO regression")
+    }
+    String jfsCluster = jfsFs.replaceFirst("^jfs://", "")
+    int slashPos = jfsCluster.indexOf("/")
+    if (slashPos > 0) {
+        jfsCluster = jfsCluster.substring(0, slashPos)
+    }
+    String jfsMetaProperty = ",\n                'juicefs.${jfsCluster}.meta' 
= '${jfsMeta}'"
+
+    String hdfsUser = context.config.otherConfigs.get("jfsHadoopUser")
+    if (hdfsUser == null || hdfsUser.trim().isEmpty()) {
+        hdfsUser = context.config.otherConfigs.get("hdfsUser")
+    }
+    if (hdfsUser == null || hdfsUser.trim().isEmpty()) {
+        hdfsUser = "root"
+    }
+
+    String externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
+    String hmsPort = context.config.otherConfigs.get("hive2HmsPort")
+    String hmsUris = context.config.otherConfigs.get("jfsHiveMetastoreUris")
+    if (hmsUris == null || hmsUris.trim().isEmpty()) {
+        hmsUris = "thrift://${externalEnvIp}:${hmsPort}"
+    }
+    String catalogName = "test_jfs_hms_catalog_read"
+    String dbName = "test_jfs_hms_catalog_read_db"
+    String tableName = "test_jfs_hms_catalog_read_tbl"
+    String jfsDbBasePath = context.config.otherConfigs.get("jfsDbBasePath")
+    if (jfsDbBasePath == null || jfsDbBasePath.trim().isEmpty()) {
+        jfsDbBasePath = "${jfsFs}/doris_jfs/${hdfsUser}"
+    }
+    jfsDbBasePath = jfsDbBasePath.replaceAll('/+$', '')
+    String jfsStagingDir = context.config.otherConfigs.get("jfsStagingDir")
+    if (jfsStagingDir == null || jfsStagingDir.trim().isEmpty()) {
+        jfsStagingDir = "${jfsDbBasePath}/.doris_staging"
+    }
+    jfsStagingDir = jfsStagingDir.replaceAll('/+$', '')
+    String dbLocation = "${jfsDbBasePath}/${dbName}"
+
+    try {
+        sql """drop catalog if exists ${catalogName}"""
+
+        sql """
+            CREATE CATALOG ${catalogName} PROPERTIES (
+                'type' = 'hms',
+                'hive.metastore.uris' = '${hmsUris}',
+                'fs.defaultFS' = '${jfsFs}',
+                'fs.jfs.impl' = '${jfsImpl}',
+                'hadoop.username' = '${hdfsUser}',
+                'hive.staging_dir' = '${jfsStagingDir}'
+                ${jfsMetaProperty}
+            );
+        """
+
+        sql """switch ${catalogName}"""
+        def dbs = sql """show databases"""
+        assertTrue(dbs.size() > 0)
+
+        sql """
+            create database if not exists `${dbName}`
+            properties('location'='${dbLocation}')
+        """
+        sql """use `${dbName}`"""
+        sql """drop table if exists `${tableName}`"""
+        sql """
+            CREATE TABLE `${tableName}` (
+              `id` INT,
+              `name` STRING
+            ) ENGINE=hive
+            PROPERTIES (
+              'file_format'='parquet'
+            )
+        """
+        sql """insert into `${tableName}` values (1, 'jfs_1'), (2, 'jfs_2')"""
+
+        def cnt = sql """select count(*) from `${tableName}`"""
+        assertEquals("2", cnt[0][0].toString())
+
+        def rows = sql """select * from `${tableName}` order by id"""
+        assertTrue(rows.size() == 2)
+        assertEquals("1", rows[0][0].toString())
+        assertEquals("jfs_1", rows[0][1].toString())
+        assertEquals("2", rows[1][0].toString())
+        assertEquals("jfs_2", rows[1][1].toString())
+    } finally {
+        try {
+            sql """switch ${catalogName}"""
+            sql """drop table if exists `${dbName}`.`${tableName}`"""
+            sql """drop database if exists `${dbName}`"""

Review Comment:
   **[Low]** Per Doris regression test standards: "After completing tests, do 
not drop tables; instead drop tables before using them in tests, to preserve 
the environment for debugging." The `finally` block dropping 
tables/database/catalog goes against this convention. The drop-before-create at 
lines 82 and 105 is sufficient for cleanup between runs.



##########
docker/thirdparties/run-thirdparties-docker.sh:
##########
@@ -230,6 +230,200 @@ reserve_ports() {
     fi
 }
 
+JFS_META_FORMATTED=0
+DORIS_ROOT="$(cd "${ROOT}/../.." &>/dev/null && pwd)"
+JUICEFS_DEFAULT_VERSION="1.3.1"
+JUICEFS_LOCAL_BIN="${DORIS_ROOT}/thirdparty/installed/juicefs_bin/juicefs"
+
+find_juicefs_hadoop_jar() {
+    local juicefs_jar=""
+    local -a jar_globs=(
+        
"${DORIS_ROOT}/thirdparty/installed/juicefs_libs/juicefs-hadoop-[0-9]*.jar"
+        "${DORIS_ROOT}/output/fe/lib/juicefs/juicefs-hadoop-[0-9]*.jar"
+        
"${DORIS_ROOT}/output/be/lib/java_extensions/juicefs/juicefs-hadoop-[0-9]*.jar"
+        
"${DORIS_ROOT}/../../../clusterEnv/*/Cluster*/fe/lib/juicefs/juicefs-hadoop-[0-9]*.jar"
+        
"${DORIS_ROOT}/../../../clusterEnv/*/Cluster*/be/lib/java_extensions/juicefs/juicefs-hadoop-[0-9]*.jar"
+        
"/mnt/ssd01/pipline/OpenSourceDoris/clusterEnv/*/Cluster*/fe/lib/juicefs/juicefs-hadoop-[0-9]*.jar"
+        
"/mnt/ssd01/pipline/OpenSourceDoris/clusterEnv/*/Cluster*/be/lib/java_extensions/juicefs/juicefs-hadoop-[0-9]*.jar"
+    )
+
+    for jar_glob in "${jar_globs[@]}"; do
+        juicefs_jar=$(compgen -G "${jar_glob}" | head -n 1 || true)
+        if [[ -n "${juicefs_jar}" ]]; then
+            echo "${juicefs_jar}"
+            return 0
+        fi
+    done
+
+    return 1
+}
+
+detect_juicefs_version() {
+    local juicefs_jar
+    juicefs_jar=$(find_juicefs_hadoop_jar || true)
+    if [[ -z "${juicefs_jar}" ]]; then
+        echo "${JUICEFS_DEFAULT_VERSION}"
+        return
+    fi
+
+    juicefs_jar=$(basename "${juicefs_jar}")
+    juicefs_jar=${juicefs_jar#juicefs-hadoop-}
+    echo "${juicefs_jar%.jar}"
+}
+
+download_juicefs_hadoop_jar() {
+    local juicefs_version="$1"
+    local cache_dir="${DORIS_ROOT}/thirdparty/installed/juicefs_libs"
+    local jar_name="juicefs-hadoop-${juicefs_version}.jar"
+    local target_jar="${cache_dir}/${jar_name}"
+    local 
download_url="https://github.com/juicedata/juicefs/releases/download/v${juicefs_version}/${jar_name}";
+

Review Comment:
   **[Medium]** Inconsistent download URL: This downloads the JuiceFS Hadoop 
JAR from **GitHub Releases** (`github.com/juicedata/juicefs/releases/...`), but 
`build.sh:download_juicefs_hadoop_jar()` downloads from **Maven Central** 
(`repo1.maven.org/maven2/io/juicefs/...`), and `thirdparty/vars.sh` also 
specifies Maven Central.
   
   While these may resolve to the same artifact, using different URLs across 
the codebase is fragile. Consider consolidating to a single source (Maven 
Central seems to be the canonical one used by `vars.sh`).



##########
docker/thirdparties/run-thirdparties-docker.sh:
##########
@@ -230,6 +230,200 @@ reserve_ports() {
     fi
 }
 
+JFS_META_FORMATTED=0
+DORIS_ROOT="$(cd "${ROOT}/../.." &>/dev/null && pwd)"
+JUICEFS_DEFAULT_VERSION="1.3.1"
+JUICEFS_LOCAL_BIN="${DORIS_ROOT}/thirdparty/installed/juicefs_bin/juicefs"
+
+find_juicefs_hadoop_jar() {
+    local juicefs_jar=""

Review Comment:
   **[Low]** Code duplication: `find_juicefs_hadoop_jar()`, 
`detect_juicefs_version()`, and `download_juicefs_hadoop_jar()` are nearly 
duplicated between this file and `build.sh` with slightly different search 
paths and download URLs. Consider extracting them into a shared helper script 
(e.g., `thirdparty/juicefs-helpers.sh`) to avoid divergence.



##########
regression-test/suites/external_table_p0/refactor_storage_param/test_jfs_hms_catalog_read.groovy:
##########
@@ -0,0 +1,137 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_jfs_hms_catalog_read", "p0,external") {
+    String enableJfs = context.config.otherConfigs.get("enableJfsTest")
+    if (enableJfs == null || !enableJfs.equalsIgnoreCase("true")) {
+        logger.info("disable JFS test.")
+        return
+    }
+
+    String enableHive = context.config.otherConfigs.get("enableHiveTest")
+    if (enableHive == null || !enableHive.equalsIgnoreCase("true")) {
+        logger.info("disable Hive test.")
+        return
+    }
+
+    String jfsFs = context.config.otherConfigs.get("jfsFs")
+    if (jfsFs == null || jfsFs.trim().isEmpty()) {
+        logger.info("skip JFS test because jfsFs is empty.")
+        return
+    }
+
+    String jfsImpl = context.config.otherConfigs.get("jfsImpl")
+    if (jfsImpl == null || jfsImpl.trim().isEmpty()) {
+        jfsImpl = "io.juicefs.JuiceFileSystem"
+    }
+    String jfsMeta = context.config.otherConfigs.get("jfsMeta")
+    if (jfsMeta == null || jfsMeta.trim().isEmpty()) {
+        throw new IllegalStateException("jfsMeta must be configured for JFS 
data IO regression")
+    }
+    String jfsCluster = jfsFs.replaceFirst("^jfs://", "")
+    int slashPos = jfsCluster.indexOf("/")
+    if (slashPos > 0) {
+        jfsCluster = jfsCluster.substring(0, slashPos)
+    }
+    String jfsMetaProperty = ",\n                'juicefs.${jfsCluster}.meta' 
= '${jfsMeta}'"
+
+    String hdfsUser = context.config.otherConfigs.get("jfsHadoopUser")
+    if (hdfsUser == null || hdfsUser.trim().isEmpty()) {
+        hdfsUser = context.config.otherConfigs.get("hdfsUser")
+    }
+    if (hdfsUser == null || hdfsUser.trim().isEmpty()) {
+        hdfsUser = "root"
+    }
+
+    String externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
+    String hmsPort = context.config.otherConfigs.get("hive2HmsPort")
+    String hmsUris = context.config.otherConfigs.get("jfsHiveMetastoreUris")

Review Comment:
   **[Low]** The fallback HMS URI uses `hive2HmsPort` (port 9083 typically), 
but the pipeline config at `regression-conf.groovy` sets `jfsHiveMetastoreUris 
= "thrift://127.0.0.1:9383"` (hive3). If `jfsHiveMetastoreUris` is unset in a 
non-pipeline environment, this fallback would point to the hive2 metastore 
rather than hive3, which may not have the JuiceFS filesystem implementation 
configured. Consider using `hive3HmsPort` as the fallback, or document the 
assumption.



##########
build.sh:
##########
@@ -822,6 +899,7 @@ if [[ "${BUILD_FE}" -eq 1 ]]; then
     if [[ "${BUILD_JINDOFS}" == "ON" ]]; then
         install -d "${DORIS_OUTPUT}/fe/lib/jindofs"
     fi
+    install -d "${DORIS_OUTPUT}/fe/lib/juicefs"
     cp -r -p "${DORIS_HOME}/fe/fe-core/target/lib"/* "${DORIS_OUTPUT}/fe/lib"/

Review Comment:
   **[Nit]** Unlike JinDoFS which is gated by `BUILD_JINDOFS`, the JuiceFS jar 
directory is always created and the jar is always copied unconditionally. This 
means every Doris build will attempt to download the JuiceFS jar from Maven 
Central if not cached locally. Consider adding a `BUILD_JUICEFS` flag similar 
to `BUILD_JINDOFS` to make this opt-in, or at minimum document why it should be 
unconditional.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to