This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new ce95da8dfb [improvement](multi-catalog) support specify hadoop 
username (#14734)
ce95da8dfb is described below

commit ce95da8dfbea86d03e0a3f9a6d02d70a2c9dc701
Author: Mingyu Chen <morning...@163.com>
AuthorDate: Sun Dec 4 21:09:39 2022 +0800

    [improvement](multi-catalog) support specify hadoop username (#14734)
    
    Support setting "hadoop.username" property when creating hms catalog.
---
 be/src/io/hdfs_builder.cpp                         |   3 +
 .../docs/ecosystem/external-table/multi-catalog.md | 151 +++++++++----------
 .../docs/ecosystem/external-table/multi-catalog.md | 163 +++++++++++----------
 fe/fe-core/pom.xml                                 |   1 +
 .../apache/doris/datasource/CatalogProperty.java   |   4 +-
 5 files changed, 165 insertions(+), 157 deletions(-)

diff --git a/be/src/io/hdfs_builder.cpp b/be/src/io/hdfs_builder.cpp
index 637e40c6a7..bb58b3c11e 100644
--- a/be/src/io/hdfs_builder.cpp
+++ b/be/src/io/hdfs_builder.cpp
@@ -72,6 +72,9 @@ THdfsParams parse_properties(const std::map<std::string, 
std::string>& propertie
             iter = prop.erase(iter);
         }
     }
+    if (!hdfsParams.__isset.user && std::getenv("HADOOP_USER_NAME") != 
nullptr) {
+        hdfsParams.__set_user(std::getenv("HADOOP_USER_NAME"));
+    }
     hdfsParams.__set_hdfs_conf(hdfs_configs);
     return hdfsParams;
 }
diff --git a/docs/en/docs/ecosystem/external-table/multi-catalog.md 
b/docs/en/docs/ecosystem/external-table/multi-catalog.md
index ed43329608..dabfcf4fb9 100644
--- a/docs/en/docs/ecosystem/external-table/multi-catalog.md
+++ b/docs/en/docs/ecosystem/external-table/multi-catalog.md
@@ -43,32 +43,32 @@ This function will be used as a supplement and enhancement 
to the previous exter
 
 1. Internal Catalog
 
-       Doris's original Database and Table will belong to Internal Catalog. 
Internal Catalog is the built-in default Catalog, which cannot be modified or 
deleted by the user.
+    Doris's original Database and Table will belong to Internal Catalog. 
Internal Catalog is the built-in default Catalog, which cannot be modified or 
deleted by the user.
 
 2. External Catalog
 
-       An External Catalog can be created with the [CREATE 
CATALOG](../../sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md)
 command. After creation, you can view the created catalog through the [SHOW 
CATALOGS](../../sql-manual/sql-reference/Show-Statements/SHOW-CATALOGS.md) 
command.
+    An External Catalog can be created with the [CREATE 
CATALOG](../../sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md)
 command. After creation, you can view the created catalog through the [SHOW 
CATALOGS](../../sql-manual/sql-reference/Show-Statements/SHOW-CATALOGS.md) 
command.
 
 3. Switch Catalog
 
-       After users log in to Doris, they enter the Internal Catalog by 
default, so the default usage is the same as the previous version. You can 
directly use `SHOW DATABASES`, `USE DB` and other commands to view and switch 
databases.
+    After users log in to Doris, they enter the Internal Catalog by default, 
so the default usage is the same as the previous version. You can directly use 
`SHOW DATABASES`, `USE DB` and other commands to view and switch databases.
 
-       Users can switch the catalog through the 
[SWITCH](../../sql-manual/sql-reference/Utility-Statements/SWITCH.md) command. 
like:
+    Users can switch the catalog through the 
[SWITCH](../../sql-manual/sql-reference/Utility-Statements/SWITCH.md) command. 
like:
 
-       ````
-       SWiTCH internal;
-       SWITCH hive_catalog;
-       ````
+    ````
+    SWiTCH internal;
+    SWITCH hive_catalog;
+    ````
 
-       After switching, you can directly view and switch the Database in the 
corresponding Catalog through commands such as `SHOW DATABASES`, `USE DB`. 
Doris will automatically sync the Database and Table in the Catalog. Users can 
view and access data in the External Catalog as they would with the Internal 
Catalog.
+    After switching, you can directly view and switch the Database in the 
corresponding Catalog through commands such as `SHOW DATABASES`, `USE DB`. 
Doris will automatically sync the Database and Table in the Catalog. Users can 
view and access data in the External Catalog as they would with the Internal 
Catalog.
 
-       Currently, Doris only supports read-only access to data in the External 
Catalog.
-       
+    Currently, Doris only supports read-only access to data in the External 
Catalog.
+    
 4. Drop Catalog
 
-       Both Database and Table in External Catalog are read-only. However, the 
catalog can be deleted (Internal Catalog cannot be deleted). An External 
Catalog can be dropped via the [DROP 
CATALOG](../../../sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-CATALOG)
 command.
+    Both Database and Table in External Catalog are read-only. However, the 
catalog can be deleted (Internal Catalog cannot be deleted). An External 
Catalog can be dropped via the [DROP 
CATALOG](../../../sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-CATALOG)
 command.
 
-       This operation will only delete the mapping information of the catalog 
in Doris, and will not modify or change the contents of any external data 
source.
+    This operation will only delete the mapping information of the catalog in 
Doris, and will not modify or change the contents of any external data source.
 
 ## Samples
 
@@ -82,13 +82,14 @@ The following example is used to create a Catalog named 
hive to connect the spec
 
 ```
 CREATE CATALOG hive PROPERTIES (
-       "type"="hms",
-       'hive.metastore.uris' = 'thrift://172.21.0.1:7004',
-       'dfs.nameservices'='service1',
-       'dfs.ha.namenodes. service1'='nn1,nn2',
-       'dfs.namenode.rpc-address.HDFS8000871.nn1'='172.21.0.2:4007',
-       'dfs.namenode.rpc-address.HDFS8000871.nn2'='172.21.0.3:4007',
-       
'dfs.client.failover.proxy.provider.HDFS8000871'='org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider'
+    "type"="hms",
+    'hive.metastore.uris' = 'thrift://172.21.0.1:7004',
+    'hadoop.username' = 'hive'
+    'dfs.nameservices'='service1',
+    'dfs.ha.namenodes. service1'='nn1,nn2',
+    'dfs.namenode.rpc-address.HDFS8000871.nn1'='172.21.0.2:4007',
+    'dfs.namenode.rpc-address.HDFS8000871.nn2'='172.21.0.3:4007',
+    
'dfs.client.failover.proxy.provider.HDFS8000871'='org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider'
 );
 ```
 
@@ -241,9 +242,9 @@ The following example creates a Catalog connection named es 
to the specified ES
 
 ```
 CREATE CATALOG es PROPERTIES (
-       "type"="es",
-       "elasticsearch.hosts"="http://192.168.120.12:29200";,
-       "elasticsearch.nodes_discovery"="false"
+    "type"="es",
+    "elasticsearch.hosts"="http://192.168.120.12:29200";,
+    "elasticsearch.nodes_discovery"="false"
 );
 ```
 
@@ -312,60 +313,60 @@ Parameter | Description
 
 1. Create hive-site.xml
 
-       Create hive-site.xml and put it in `fe/conf` and `be/conf`.
-       
-       ```
-       <?xml version="1.0"?>
-       <configuration>
-           <!--Set to use dlf client-->
-           <property>
-               <name>hive.metastore.type</name>
-               <value>dlf</value>
-           </property>
-           <property>
-               <name>dlf.catalog.endpoint</name>
-               <value>dlf-vpc.cn-beijing.aliyuncs.com</value>
-           </property>
-           <property>
-               <name>dlf.catalog.region</name>
-               <value>cn-beijing</value>
-           </property>
-           <property>
-               <name>dlf.catalog.proxyMode</name>
-               <value>DLF_ONLY</value>
-           </property>
-           <property>
-               <name>dlf.catalog.uid</name>
-               <value>20000000000000000</value>
-           </property>
-           <property>
-               <name>dlf.catalog.accessKeyId</name>
-               <value>XXXXXXXXXXXXXXX</value>
-           </property>
-           <property>
-               <name>dlf.catalog.accessKeySecret</name>
-               <value>XXXXXXXXXXXXXXXXX</value>
-           </property>
-       </configuration>
-       ```
-
-       * `dlf.catalog.endpoint`: DLF Endpoint. See: [Regions and endpoints of 
DLF](https://www.alibabacloud.com/help/en/data-lake-formation/latest/regions-and-endpoints)
-       * `dlf.catalog.region`: DLF Regio. See: [Regions and endpoints of 
DLF](https://www.alibabacloud.com/help/en/data-lake-formation/latest/regions-and-endpoints)
-       * `dlf.catalog.uid`: Ali Cloud Account ID. That is, the "cloud account 
ID" of the personal information in the upper right corner of the Alibaba Cloud 
console.  * `dlf.catalog.accessKeyId`: AccessKey. See: [Ali Could 
Console](https://ram.console.aliyun.com/manage/ak).
-       * `dlf.catalog.accessKeySecret`: SecretKey. See: [Ali Could 
Console](https://ram.console.aliyun.com/manage/ak).
-
-       Other configuration items are fixed values and do not need to be 
changed.
+    Create hive-site.xml and put it in `fe/conf` and `be/conf`.
+    
+    ```
+    <?xml version="1.0"?>
+    <configuration>
+        <!--Set to use dlf client-->
+        <property>
+            <name>hive.metastore.type</name>
+            <value>dlf</value>
+        </property>
+        <property>
+            <name>dlf.catalog.endpoint</name>
+            <value>dlf-vpc.cn-beijing.aliyuncs.com</value>
+        </property>
+        <property>
+            <name>dlf.catalog.region</name>
+            <value>cn-beijing</value>
+        </property>
+        <property>
+            <name>dlf.catalog.proxyMode</name>
+            <value>DLF_ONLY</value>
+        </property>
+        <property>
+            <name>dlf.catalog.uid</name>
+            <value>20000000000000000</value>
+        </property>
+        <property>
+            <name>dlf.catalog.accessKeyId</name>
+            <value>XXXXXXXXXXXXXXX</value>
+        </property>
+        <property>
+            <name>dlf.catalog.accessKeySecret</name>
+            <value>XXXXXXXXXXXXXXXXX</value>
+        </property>
+    </configuration>
+    ```
+
+    * `dlf.catalog.endpoint`: DLF Endpoint. See: [Regions and endpoints of 
DLF](https://www.alibabacloud.com/help/en/data-lake-formation/latest/regions-and-endpoints)
+    * `dlf.catalog.region`: DLF Regio. See: [Regions and endpoints of 
DLF](https://www.alibabacloud.com/help/en/data-lake-formation/latest/regions-and-endpoints)
+    * `dlf.catalog.uid`: Ali Cloud Account ID. That is, the "cloud account ID" 
of the personal information in the upper right corner of the Alibaba Cloud 
console.    * `dlf.catalog.accessKeyId`: AccessKey. See: [Ali Could 
Console](https://ram.console.aliyun.com/manage/ak).
+    * `dlf.catalog.accessKeySecret`: SecretKey. See: [Ali Could 
Console](https://ram.console.aliyun.com/manage/ak).
+
+    Other configuration items are fixed values and do not need to be changed.
 
 2. Restart FE and create a catalog with the `CREATE CATALOG` statement.
 
-       ```
-       CREATE CATALOG dlf PROPERTIES (
-           "type"="hms",
-           "hive.metastore.uris" = "thrift://127.0.0.1:9083"
-       );
-       ```
-       
-       where `type` is fixed to `hms`. The value of `hive.metastore.uris` can 
be filled in at will, but it will not be used in practice. But it needs to be 
filled in the standard hive metastore thrift uri format.
+    ```
+    CREATE CATALOG dlf PROPERTIES (
+        "type"="hms",
+        "hive.metastore.uris" = "thrift://127.0.0.1:9083"
+    );
+    ```
+    
+    where `type` is fixed to `hms`. The value of `hive.metastore.uris` can be 
filled in at will, but it will not be used in practice. But it needs to be 
filled in the standard hive metastore thrift uri format.
 
 After that, the metadata under DLF can be accessed like a normal Hive 
MetaStore.
 
@@ -431,4 +432,4 @@ Currently, users need to manually refresh metadata via the 
[REFRESH CATALOG](../
 
 Automatic synchronization of metadata will be supported soon.
 
-</version>
\ No newline at end of file
+</version>
diff --git a/docs/zh-CN/docs/ecosystem/external-table/multi-catalog.md 
b/docs/zh-CN/docs/ecosystem/external-table/multi-catalog.md
index 6bf4b54a89..841c6b6766 100644
--- a/docs/zh-CN/docs/ecosystem/external-table/multi-catalog.md
+++ b/docs/zh-CN/docs/ecosystem/external-table/multi-catalog.md
@@ -43,32 +43,32 @@ under the License.
 
 1. Internal Catalog
 
-       Doris 原有的 Database 和 Table 都将归属于 Internal Catalog。Internal Catalog 
是内置的默认 Catalog,用户不可修改或删除。
+    Doris 原有的 Database 和 Table 都将归属于 Internal Catalog。Internal Catalog 是内置的默认 
Catalog,用户不可修改或删除。
 
 2. External Catalog
 
-       可以通过 [CREATE 
CATALOG](../../sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md)
 命令创建一个 External Catalog。创建后,可以通过 [SHOW 
CATALOGS](../../sql-manual/sql-reference/Show-Statements/SHOW-CATALOGS.md) 
命令查看已创建的 Catalog。
+    可以通过 [CREATE 
CATALOG](../../sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md)
 命令创建一个 External Catalog。创建后,可以通过 [SHOW 
CATALOGS](../../sql-manual/sql-reference/Show-Statements/SHOW-CATALOGS.md) 
命令查看已创建的 Catalog。
 
 3. 切换 Catalog
 
-       用户登录 Doris 后,默认进入 Internal Catalog,因此默认的使用和之前版本并无差别,可以直接使用 `SHOW 
DATABASES`,`USE DB` 等命令查看和切换数据库。
-       
-       用户可以通过 
[SWITCH](../../sql-manual/sql-reference/Utility-Statements/SWITCH.md) 命令切换 
Catalog。如:
-       
-       ```
-       SWiTCH internal;
-       SWITCH hive_catalog;
-       ```
-       
-       切换后,可以直接通过 `SHOW DATABASES`,`USE DB` 等命令查看和切换对应 Catalog 中的 
Database。Doris 会自动通过 Catalog 中的 Database 和 Table。用户可以像使用 Internal Catalog 一样,对 
External Catalog 中的数据进行查看和访问。
-       
-       当前,Doris 只支持对 External Catalog 中的数据进行只读访问。
-       
+    用户登录 Doris 后,默认进入 Internal Catalog,因此默认的使用和之前版本并无差别,可以直接使用 `SHOW 
DATABASES`,`USE DB` 等命令查看和切换数据库。
+    
+    用户可以通过 
[SWITCH](../../sql-manual/sql-reference/Utility-Statements/SWITCH.md) 命令切换 
Catalog。如:
+    
+    ```
+    SWiTCH internal;
+    SWITCH hive_catalog;
+    ```
+    
+    切换后,可以直接通过 `SHOW DATABASES`,`USE DB` 等命令查看和切换对应 Catalog 中的 Database。Doris 
会自动通过 Catalog 中的 Database 和 Table。用户可以像使用 Internal Catalog 一样,对 External 
Catalog 中的数据进行查看和访问。
+    
+    当前,Doris 只支持对 External Catalog 中的数据进行只读访问。
+    
 4. 删除 Catalog
 
-       External Catalog 中的 Database 和 Table 都是只读的。但是可以删除 Catalog(Internal 
Catalog无法删除)。可以通过 [DROP 
CATALOG](../../../sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-CATALOG)
 命令删除一个 External Catalog。
-       
-       该操作仅会删除 Doris 中该 Catalog 的映射信息,并不会修改或变更任何外部数据目录的内容。
+    External Catalog 中的 Database 和 Table 都是只读的。但是可以删除 Catalog(Internal 
Catalog无法删除)。可以通过 [DROP 
CATALOG](../../../sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-CATALOG)
 命令删除一个 External Catalog。
+    
+    该操作仅会删除 Doris 中该 Catalog 的映射信息,并不会修改或变更任何外部数据目录的内容。
 
 ## 连接示例
 
@@ -82,13 +82,14 @@ under the License.
 
 ```
 CREATE CATALOG hive PROPERTIES (
-       "type"="hms",
-       'hive.metastore.uris' = 'thrift://172.21.0.1:7004',
-       'dfs.nameservices'='service1',
-       'dfs.ha.namenodes. service1'='nn1,nn2',
-       'dfs.namenode.rpc-address.HDFS8000871.nn1'='172.21.0.2:4007',
-       'dfs.namenode.rpc-address.HDFS8000871.nn2'='172.21.0.3:4007',
-       
'dfs.client.failover.proxy.provider.HDFS8000871'='org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider'
+    "type"="hms",
+    'hive.metastore.uris' = 'thrift://172.21.0.1:7004',
+    'hadoop.username' = 'hive'
+    'dfs.nameservices'='service1',
+    'dfs.ha.namenodes. service1'='nn1,nn2',
+    'dfs.namenode.rpc-address.HDFS8000871.nn1'='172.21.0.2:4007',
+    'dfs.namenode.rpc-address.HDFS8000871.nn2'='172.21.0.3:4007',
+    
'dfs.client.failover.proxy.provider.HDFS8000871'='org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider'
 );
 ```
 
@@ -241,9 +242,9 @@ Query OK, 1000 rows affected (0.28 sec)
 
 ```
 CREATE CATALOG es PROPERTIES (
-       "type"="es",
-       "elasticsearch.hosts"="http://192.168.120.12:29200";,
-       "elasticsearch.nodes_discovery"="false"
+    "type"="es",
+    "elasticsearch.hosts"="http://192.168.120.12:29200";,
+    "elasticsearch.nodes_discovery"="false"
 );
 ```
 
@@ -312,62 +313,62 @@ mysql> select * from test;
 
 1. 创建 hive-site.xml
 
-       创建 hive-site.xml 文件,并将其放置在 `fe/conf` 和 `be/conf` 目录下。
-       
-       ```
-       <?xml version="1.0"?>
-       <configuration>
-           <!--Set to use dlf client-->
-           <property>
-               <name>hive.metastore.type</name>
-               <value>dlf</value>
-           </property>
-           <property>
-               <name>dlf.catalog.endpoint</name>
-               <value>dlf-vpc.cn-beijing.aliyuncs.com</value>
-           </property>
-           <property>
-               <name>dlf.catalog.region</name>
-               <value>cn-beijing</value>
-           </property>
-           <property>
-               <name>dlf.catalog.proxyMode</name>
-               <value>DLF_ONLY</value>
-           </property>
-           <property>
-               <name>dlf.catalog.uid</name>
-               <value>20000000000000000</value>
-           </property>
-           <property>
-               <name>dlf.catalog.accessKeyId</name>
-               <value>XXXXXXXXXXXXXXX</value>
-           </property>
-           <property>
-               <name>dlf.catalog.accessKeySecret</name>
-               <value>XXXXXXXXXXXXXXXXX</value>
-           </property>
-       </configuration>
-       ```
-
-       * `dlf.catalog.endpoint`:DLF Endpoint,参阅:[DLF 
Region和Endpoint对照表](https://www.alibabacloud.com/help/zh/data-lake-formation/latest/regions-and-endpoints)
-       * `dlf.catalog.region`:DLF Region,参阅:[DLF 
Region和Endpoint对照表](https://www.alibabacloud.com/help/zh/data-lake-formation/latest/regions-and-endpoints)
-       * `dlf.catalog.uid`:阿里云账号。即阿里云控制台右上角个人信息的“云账号ID”。
-       * `dlf.catalog.accessKeyId`:AccessKey。可以在 
[阿里云控制台](https://ram.console.aliyun.com/manage/ak) 中创建和管理。
-       * `dlf.catalog.accessKeySecret`:SecretKey。可以在 
[阿里云控制台](https://ram.console.aliyun.com/manage/ak) 中创建和管理。
-
-       其他配置项为固定值,无需改动。
+    创建 hive-site.xml 文件,并将其放置在 `fe/conf` 和 `be/conf` 目录下。
+    
+    ```
+    <?xml version="1.0"?>
+    <configuration>
+        <!--Set to use dlf client-->
+        <property>
+            <name>hive.metastore.type</name>
+            <value>dlf</value>
+        </property>
+        <property>
+            <name>dlf.catalog.endpoint</name>
+            <value>dlf-vpc.cn-beijing.aliyuncs.com</value>
+        </property>
+        <property>
+            <name>dlf.catalog.region</name>
+            <value>cn-beijing</value>
+        </property>
+        <property>
+            <name>dlf.catalog.proxyMode</name>
+            <value>DLF_ONLY</value>
+        </property>
+        <property>
+            <name>dlf.catalog.uid</name>
+            <value>20000000000000000</value>
+        </property>
+        <property>
+            <name>dlf.catalog.accessKeyId</name>
+            <value>XXXXXXXXXXXXXXX</value>
+        </property>
+        <property>
+            <name>dlf.catalog.accessKeySecret</name>
+            <value>XXXXXXXXXXXXXXXXX</value>
+        </property>
+    </configuration>
+    ```
+
+    * `dlf.catalog.endpoint`:DLF Endpoint,参阅:[DLF 
Region和Endpoint对照表](https://www.alibabacloud.com/help/zh/data-lake-formation/latest/regions-and-endpoints)
+    * `dlf.catalog.region`:DLF Region,参阅:[DLF 
Region和Endpoint对照表](https://www.alibabacloud.com/help/zh/data-lake-formation/latest/regions-and-endpoints)
+    * `dlf.catalog.uid`:阿里云账号。即阿里云控制台右上角个人信息的“云账号ID”。
+    * `dlf.catalog.accessKeyId`:AccessKey。可以在 
[阿里云控制台](https://ram.console.aliyun.com/manage/ak) 中创建和管理。
+    * `dlf.catalog.accessKeySecret`:SecretKey。可以在 
[阿里云控制台](https://ram.console.aliyun.com/manage/ak) 中创建和管理。
+
+    其他配置项为固定值,无需改动。
 
 2. 重启 FE,并通过 `CREATE CATALOG` 语句创建 catalog。
 
-       ```
-       CREATE CATALOG dlf PROPERTIES (
-           "type"="hms",
-           "hive.metastore.uris" = "thrift://127.0.0.1:9083"
-       );
-       ```
-       
-       其中 `type` 固定为 `hms`。 `hive.metastore.uris` 的值随意填写即可,实际不会使用。但需要按照标准 hive 
metastore thrift uri 格式填写。
-       
+    ```
+    CREATE CATALOG dlf PROPERTIES (
+        "type"="hms",
+        "hive.metastore.uris" = "thrift://127.0.0.1:9083"
+    );
+    ```
+    
+    其中 `type` 固定为 `hms`。 `hive.metastore.uris` 的值随意填写即可,实际不会使用。但需要按照标准 hive 
metastore thrift uri 格式填写。
+    
 之后,可以像正常的 Hive MetaStore 一样,访问 DLF 下的元数据。 
 
 
diff --git a/fe/fe-core/pom.xml b/fe/fe-core/pom.xml
index 8b202b255c..2b4016b026 100644
--- a/fe/fe-core/pom.xml
+++ b/fe/fe-core/pom.xml
@@ -799,6 +799,7 @@ under the License.
                     <forkCount>${fe_ut_parallel}</forkCount>
                     <!-->not reuse forked jvm, so that each unit test will run 
in separate jvm. to avoid singleton conflict<-->
                     <reuseForks>false</reuseForks>
+                    <useFile>false</useFile>
                     <argLine>
                         
-javaagent:${settings.localRepository}/org/jmockit/jmockit/${jmockit.version}/jmockit-${jmockit.version}.jar
                     </argLine>
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogProperty.java 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogProperty.java
index 7f76a98d97..a2698d1363 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogProperty.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogProperty.java
@@ -20,6 +20,7 @@ package org.apache.doris.datasource;
 import org.apache.doris.catalog.HiveTable;
 import org.apache.doris.common.io.Text;
 import org.apache.doris.common.io.Writable;
+import org.apache.doris.common.util.BrokerUtil;
 import org.apache.doris.persist.gson.GsonUtils;
 
 import com.google.common.collect.Maps;
@@ -46,7 +47,8 @@ public class CatalogProperty implements Writable {
     public Map<String, String> getDfsProperties() {
         Map<String, String> dfsProperties = Maps.newHashMap();
         for (Map.Entry<String, String> entry : properties.entrySet()) {
-            if (entry.getKey().startsWith(HiveTable.HIVE_HDFS_PREFIX)) {
+            if (entry.getKey().startsWith(HiveTable.HIVE_HDFS_PREFIX)
+                    || entry.getKey().equals(BrokerUtil.HADOOP_USER_NAME)) {
                 dfsProperties.put(entry.getKey(), entry.getValue());
             }
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to