This is an automated email from the ASF dual-hosted git repository.

kxiao pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git

commit 3d21e1d74e7ceb0afbbdb62de9addb52d092fada
Author: Siyang Tang <82279870+tangsiyang2...@users.noreply.github.com>
AuthorDate: Thu Jul 13 10:15:55 2023 +0800

    [feature](TVF) support path partition keys for external file TVF (#21648)
---
 .../sql-functions/table-functions/hdfs.md          |   4 +
 .../sql-manual/sql-functions/table-functions/s3.md |   4 +
 .../sql-functions/table-functions/hdfs.md          |   5 +-
 .../sql-manual/sql-functions/table-functions/s3.md |   3 +
 .../org/apache/doris/analysis/S3TvfLoadStmt.java   |   6 ++
 .../apache/doris/planner/SingleNodePlanner.java    |   2 +-
 .../apache/doris/planner/external/TVFScanNode.java |   2 +-
 .../ExternalFileTableValuedFunction.java           |  37 ++++++--
 .../tablefunction/HdfsTableValuedFunction.java     |   9 +-
 .../doris/tablefunction/S3TableValuedFunction.java |   3 +-
 .../broker_load/test_tvf_based_broker_load.groovy  | 105 ++++++++++++++++++++-
 11 files changed, 163 insertions(+), 17 deletions(-)

diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/hdfs.md 
b/docs/en/docs/sql-manual/sql-functions/table-functions/hdfs.md
index d741fb7001..e1db81ec35 100644
--- a/docs/en/docs/sql-manual/sql-functions/table-functions/hdfs.md
+++ b/docs/en/docs/sql-manual/sql-functions/table-functions/hdfs.md
@@ -88,6 +88,10 @@ File format parameters:
 - `trim_double_quotes`: Boolean type (optional), the default value is `false`. 
True means that the outermost double quotes of each field in the csv file are 
trimmed.
 - `skip_lines`: Integer type (optional), the default value is 0. It will skip 
some lines in the head of csv file. It will be disabled when the format is 
`csv_with_names` or `csv_with_names_and_types`.
 
+other kinds of parameters:
+
+- `path_partition_keys`: (optional) Specifies the column names carried in the 
file path. For example, if the file path is 
/path/to/city=beijing/date="2023-07-09", you should fill in 
`path_partition_keys="city,date"`. It will automatically read the corresponding 
column names and values from the path during load process.
+
 ### Examples
 
 Read and access csv format files on hdfs storage.
diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/s3.md 
b/docs/en/docs/sql-manual/sql-functions/table-functions/s3.md
index a3c99102b5..e793daec79 100644
--- a/docs/en/docs/sql-manual/sql-functions/table-functions/s3.md
+++ b/docs/en/docs/sql-manual/sql-functions/table-functions/s3.md
@@ -87,6 +87,10 @@ file format parameter:
 - `trim_double_quotes`: Boolean type (optional), the default value is `false`. 
True means that the outermost double quotes of each field in the csv file are 
trimmed.
 - `skip_lines`: Integer type (optional), the default value is 0. It will skip 
some lines in the head of csv file. It will be disabled when the format is 
`csv_with_names` or `csv_with_names_and_types`.
 
+other parameter:
+
+- `path_partition_keys`: (optional) Specifies the column names carried in the 
file path. For example, if the file path is 
/path/to/city=beijing/date="2023-07-09", you should fill in 
`path_partition_keys="city,date"`. It will automatically read the corresponding 
column names and values from the path during load process.
+
 ### Example
 
 Read and access csv format files on S3-compatible object storage.
diff --git a/docs/zh-CN/docs/sql-manual/sql-functions/table-functions/hdfs.md 
b/docs/zh-CN/docs/sql-manual/sql-functions/table-functions/hdfs.md
index f0f22831b0..6156060b29 100644
--- a/docs/zh-CN/docs/sql-manual/sql-functions/table-functions/hdfs.md
+++ b/docs/zh-CN/docs/sql-manual/sql-functions/table-functions/hdfs.md
@@ -87,7 +87,10 @@ hdfs(
     <version since="dev">下面2个参数是用于csv格式的导入</version>
 
 - `trim_double_quotes`: 布尔类型,选填,默认值为 `false`,为 `true` 时表示裁剪掉 csv 文件每个字段最外层的双引号
-- `skip_lines`: 整数类型,选填,默认值为0,含义为跳过csv文件的前几行。当设置format设置为 `csv_with_names` 或 
`csv_with_names_and_types` 时,该参数会失效 
+- `skip_lines`: 整数类型,选填,默认值为0,含义为跳过csv文件的前几行。当设置format设置为 `csv_with_names` 或 
`csv_with_names_and_types` 时,该参数会失效
+
+其他参数:
+- 
`path_partition_keys`:(选填)指定文件路径中携带的分区列名,例如/path/to/city=beijing/date="2023-07-09",
 则填写`path_partition_keys="city,date"`,将会自动从路径中读取相应列名和列值进行导入。
 
 ### Examples
 
diff --git a/docs/zh-CN/docs/sql-manual/sql-functions/table-functions/s3.md 
b/docs/zh-CN/docs/sql-manual/sql-functions/table-functions/s3.md
index 92d08dfc44..f5c65e3f41 100644
--- a/docs/zh-CN/docs/sql-manual/sql-functions/table-functions/s3.md
+++ b/docs/zh-CN/docs/sql-manual/sql-functions/table-functions/s3.md
@@ -89,6 +89,9 @@ S3 tvf中的每一个参数都是一个 `"key"="value"` 对。
 - `trim_double_quotes`: 布尔类型,选填,默认值为 `false`,为 `true` 时表示裁剪掉 csv 文件每个字段最外层的双引号
 - `skip_lines`: 整数类型,选填,默认值为0,含义为跳过csv文件的前几行。当设置format设置为 `csv_with_names` 或 
`csv_with_names_and_types` 时,该参数会失效
 
+其他参数:
+- 
`path_partition_keys`:(选填)指定文件路径中携带的分区列名,例如/path/to/city=beijing/date="2023-07-09",
 则填写`path_partition_keys="city,date"`,将会自动从路径中读取相应列名和列值进行导入。
+
 ### Example
 
 读取并访问 S3 兼容的对象存储上的csv格式文件
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/analysis/S3TvfLoadStmt.java 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/S3TvfLoadStmt.java
index 39d448a8da..a6f864a8c0 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/S3TvfLoadStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/S3TvfLoadStmt.java
@@ -159,6 +159,12 @@ public class S3TvfLoadStmt extends NativeInsertStmt {
             parseSeparator(dataDescription.getLineDelimiterObj(), params);
         }
 
+        List<String> columnsFromPath = dataDescription.getColumnsFromPath();
+        if (columnsFromPath != null) {
+            params.put(ExternalFileTableValuedFunction.PATH_PARTITION_KEYS,
+                    String.join(",", columnsFromPath));
+        }
+
         Preconditions.checkState(!brokerDesc.isMultiLoadBroker(), "do not 
support multi broker load currently");
         Preconditions.checkState(brokerDesc.getStorageType() == 
StorageType.S3, "only support S3 load");
 
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java 
b/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java
index efaf52820e..1707549cf5 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java
@@ -2196,7 +2196,7 @@ public class SingleNodePlanner {
 
     /**
      * Create a tree of PlanNodes for the given tblRef, which can be a 
BaseTableRef,
-     * CollectionTableRef or an InlineViewRef.
+     * TableValuedFunctionRef, CollectionTableRef or an InlineViewRef.
      * <p>
      * 'fastPartitionKeyScans' indicates whether to try to produce the slots 
with
      * metadata instead of table scans. Only applicable to BaseTableRef which 
is also
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/TVFScanNode.java 
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/TVFScanNode.java
index d895856401..c069aa43c3 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/planner/external/TVFScanNode.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/planner/external/TVFScanNode.java
@@ -118,7 +118,7 @@ public class TVFScanNode extends FileQueryScanNode {
 
     @Override
     public List<String> getPathPartitionKeys() {
-        return Lists.newArrayList();
+        return tableValuedFunction.getPathPartitionKeys();
     }
 
     @Override
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/ExternalFileTableValuedFunction.java
 
b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/ExternalFileTableValuedFunction.java
index 035d54c0d1..55c86ce92a 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/ExternalFileTableValuedFunction.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/ExternalFileTableValuedFunction.java
@@ -72,12 +72,15 @@ import org.apache.thrift.TException;
 import org.apache.thrift.TSerializer;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
+import java.util.stream.Collectors;
 
 /**
  * ExternalFileTableValuedFunction is used for S3/HDFS/LOCAL 
table-valued-function
@@ -99,6 +102,7 @@ public abstract class ExternalFileTableValuedFunction 
extends TableValuedFunctio
     protected static final String SKIP_LINES = "skip_lines";
     protected static final String CSV_SCHEMA = "csv_schema";
     protected static final String COMPRESS_TYPE = "compress_type";
+    public static final String PATH_PARTITION_KEYS = "path_partition_keys";
     // decimal(p,s)
     private static final Pattern DECIMAL_TYPE_PATTERN = 
Pattern.compile("decimal\\((\\d+),(\\d+)\\)");
     // datetime(p)
@@ -119,10 +123,13 @@ public abstract class ExternalFileTableValuedFunction 
extends TableValuedFunctio
             .add(CSV_SCHEMA)
             .build();
 
-    // Columns got from file
+    // Columns got from file and path(if has)
     protected List<Column> columns = null;
     // User specified csv columns, it will override columns got from file
-    private List<Column> csvSchema = Lists.newArrayList();
+    private final List<Column> csvSchema = Lists.newArrayList();
+
+    // Partition columns from path, e.g. /path/to/columnName=columnValue.
+    private List<String> pathPartitionKeys;
 
     protected List<TBrokerFileStatus> fileStatuses = Lists.newArrayList();
     protected Map<String, String> locationProperties;
@@ -174,6 +181,10 @@ public abstract class ExternalFileTableValuedFunction 
extends TableValuedFunctio
         return "";
     }
 
+    public List<String> getPathPartitionKeys() {
+        return pathPartitionKeys;
+    }
+
     protected void parseFile() throws AnalysisException {
         String path = getFilePath();
         BrokerDesc brokerDesc = getBrokerDesc();
@@ -233,6 +244,12 @@ public abstract class ExternalFileTableValuedFunction 
extends TableValuedFunctio
                 || formatString.equals("csv_with_names_and_types")) {
             parseCsvSchema(csvSchema, validParams);
         }
+        pathPartitionKeys = 
Optional.ofNullable(validParams.get(PATH_PARTITION_KEYS))
+                .map(str ->
+                        Arrays.stream(str.split(","))
+                                .map(String::trim)
+                                .collect(Collectors.toList()))
+                .orElse(Lists.newArrayList());
     }
 
     // public for unit test
@@ -367,8 +384,8 @@ public abstract class ExternalFileTableValuedFunction 
extends TableValuedFunctio
                 if (!result.getStatus().getErrorMsgsList().isEmpty()) {
                     errMsg = result.getStatus().getErrorMsgsList().get(0);
                 } else {
-                    errMsg =  "fetchTableStructureAsync failed. backend 
address: "
-                                + address.getHostname() + ":" + 
address.getPort();
+                    errMsg = "fetchTableStructureAsync failed. backend 
address: "
+                            + address.getHostname() + ":" + address.getPort();
                 }
                 throw new AnalysisException(errMsg);
             }
@@ -397,6 +414,7 @@ public abstract class ExternalFileTableValuedFunction 
extends TableValuedFunctio
 
     /**
      * Convert PTypeDesc into doris column type
+     *
      * @param typeNodes list PTypeNodes in PTypeDesc
      * @param start the start index of typeNode to parse
      * @return column type and the number of parsed PTypeNodes
@@ -408,7 +426,7 @@ public abstract class ExternalFileTableValuedFunction 
extends TableValuedFunctio
         int parsedNodes;
         if (tPrimitiveType == TPrimitiveType.ARRAY) {
             Pair<Type, Integer> itemType = getColumnType(typeNodes, start + 1);
-            type =  ArrayType.create(itemType.key(), true);
+            type = ArrayType.create(itemType.key(), true);
             parsedNodes = 1 + itemType.value();
         } else if (tPrimitiveType == TPrimitiveType.MAP) {
             Pair<Type, Integer> keyType = getColumnType(typeNodes, start + 1);
@@ -433,15 +451,22 @@ public abstract class ExternalFileTableValuedFunction 
extends TableValuedFunctio
     }
 
     private void fillColumns(InternalService.PFetchTableSchemaResult result)
-                            throws AnalysisException {
+            throws AnalysisException {
         if (result.getColumnNums() == 0) {
             throw new AnalysisException("The amount of column is 0");
         }
+        // add fetched file columns
         for (int idx = 0; idx < result.getColumnNums(); ++idx) {
             PTypeDesc type = result.getColumnTypes(idx);
             String colName = result.getColumnNames(idx);
             columns.add(new Column(colName, getColumnType(type.getTypesList(), 
0).key(), true));
         }
+        // add path columns
+        // HACK(tsy): path columns are all treated as STRING type now, after 
BE supports reading all columns
+        //  types by all format readers from file meta, maybe reading path 
columns types from BE then.
+        for (String colName : pathPartitionKeys) {
+            columns.add(new Column(colName, Type.STRING, false));
+        }
     }
 
     private PFetchTableSchemaRequest getFetchTableStructureRequest() throws 
AnalysisException, TException {
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/HdfsTableValuedFunction.java
 
b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/HdfsTableValuedFunction.java
index ea37ec410f..6543d5e105 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/HdfsTableValuedFunction.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/HdfsTableValuedFunction.java
@@ -59,11 +59,11 @@ public class HdfsTableValuedFunction extends 
ExternalFileTableValuedFunction {
     private String filePath;
 
     public HdfsTableValuedFunction(Map<String, String> params) throws 
AnalysisException {
-        Map<String, String> fileFormatParams = new CaseInsensitiveMap();
+        Map<String, String> fileParams = new CaseInsensitiveMap();
         locationProperties = Maps.newHashMap();
         for (String key : params.keySet()) {
             if (FILE_FORMAT_PROPERTIES.contains(key.toLowerCase())) {
-                fileFormatParams.put(key, params.get(key));
+                fileParams.put(key, params.get(key));
             } else {
                 // because HADOOP_FS_NAME contains upper and lower case
                 if (HdfsResource.HADOOP_FS_NAME.equalsIgnoreCase(key)) {
@@ -73,6 +73,9 @@ public class HdfsTableValuedFunction extends 
ExternalFileTableValuedFunction {
                 }
             }
         }
+        if (params.containsKey(PATH_PARTITION_KEYS)) {
+            fileParams.put(PATH_PARTITION_KEYS, 
params.get(PATH_PARTITION_KEYS));
+        }
 
         if (!locationProperties.containsKey(HDFS_URI)) {
             throw new AnalysisException(String.format("Configuration '%s' is 
required.", HDFS_URI));
@@ -81,7 +84,7 @@ public class HdfsTableValuedFunction extends 
ExternalFileTableValuedFunction {
         hdfsUri = URI.create(locationProperties.get(HDFS_URI));
         filePath = locationProperties.get(HdfsResource.HADOOP_FS_NAME) + 
hdfsUri.getPath();
 
-        parseProperties(fileFormatParams);
+        parseProperties(fileParams);
         parseFile();
     }
 
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java
 
b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java
index a31f3e52de..300c51c7ad 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java
@@ -54,7 +54,8 @@ public class S3TableValuedFunction extends 
ExternalFileTableValuedFunction {
             ImmutableSet.of("access_key", "secret_key", "session_token", 
"region");
 
     private static final ImmutableSet<String> OPTIONAL_KEYS =
-            ImmutableSet.of(S3Properties.SESSION_TOKEN, 
PropertyConverter.USE_PATH_STYLE, S3Properties.REGION);
+            ImmutableSet.of(S3Properties.SESSION_TOKEN, 
PropertyConverter.USE_PATH_STYLE, S3Properties.REGION,
+                    PATH_PARTITION_KEYS);
 
     private static final ImmutableSet<String> PROPERTIES_SET = 
ImmutableSet.<String>builder()
             .add(S3_URI)
diff --git 
a/regression-test/suites/load_p2/broker_load/test_tvf_based_broker_load.groovy 
b/regression-test/suites/load_p2/broker_load/test_tvf_based_broker_load.groovy
index 665730d3d5..bba1fb239e 100644
--- 
a/regression-test/suites/load_p2/broker_load/test_tvf_based_broker_load.groovy
+++ 
b/regression-test/suites/load_p2/broker_load/test_tvf_based_broker_load.groovy
@@ -126,8 +126,8 @@ suite("test_tvf_based_broker_load_p2", "p2") {
                         //    
"""WATCHID,JAVAENABLE,TITLE,GOODEVENT,EVENTTIME,EVENTDATE,COUNTERID,CLIENTIP,REGIONID,USERID,COUNTERCLASS,OS,USERAGENT,URL,REFERER,ISREFRESH,REFERERCATEGORYID,REFERERREGIONID,URLCATEGORYID,URLREGIONID,RESOLUTIONWIDTH,RESOLUTIONHEIGHT,RESOLUTIONDEPTH,FLASHMAJOR,FLASHMINOR,FLASHMINOR2,NETMAJOR,NETMINOR,USERAGENTMAJOR,USERAGENTMINOR,COOKIEENABLE,JAVASCRIPTENABLE,ISMOBILE,MOBILEPHONE,MOBILEPHONEMODEL,PARAMS,IPNETWORKID,TRAFICSOURCEID,SEARCHENGINEID,SEARCHPHRASE,
 [...]
                         //    
"""watchid,javaenable,title,goodevent,eventtime,eventdate,counterid,clientip,regionid,userid,counterclass,os,useragent,url,referer,isrefresh,referercategoryid,refererregionid,urlcategoryid,urlregionid,resolutionwidth,resolutionheight,resolutiondepth,flashmajor,flashminor,flashminor2,netmajor,netminor,useragentmajor,useragentminor,cookieenable,javascriptenable,ismobile,mobilephone,mobilephonemodel,params,ipnetworkid,traficsourceid,searchengineid,searchphrase,
 [...]
     ]
-    def column_in_paths = ["", "", "", "", "", "", "", "", "", "", "", "", 
"COLUMNS FROM PATH AS (city)", "", "", "", "", "", "", "", "", "", "", "", "", 
"", "", "", "", "", "", ""]
-    def preceding_filters = ["", "", "", "", "", "", "", "", "", "", "", 
"preceding filter p_size < 10", "", "", "", "", "", "", "", "", "", "", "", "", 
"", "", "", "", "", "", "", ""]
+    def column_in_paths = ["", "", "", "", "", "", "", "", "", "", "", "", 
"COLUMNS FROM PATH AS (city)", "", "", "", "", "", "", "", "", "", "", "", "", 
"", "", "", "", "", ""]
+    def preceding_filters = ["", "", "", "", "", "", "", "", "", "", "", 
"preceding filter p_size < 10", "", "", "", "", "", "", "", "", "", "", "", "", 
"", "", "", "", "", "", ""]
     def set_values = ["",
                       "",
                       "SET(comment=p_comment, retailprice=p_retailprice, 
container=p_container, size=p_size, type=p_type, brand=p_brand, mfgr=p_mfgr, 
name=p_name, partkey=p_partkey)",
@@ -161,8 +161,105 @@ suite("test_tvf_based_broker_load_p2", "p2") {
                       "",
                       ""
     ]
-    def where_exprs = ["", "", "", "", "", "", "", "", "", "", "", "where 
p_partkey>10", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", 
"", "", "",""]
-    def line_delimiters = ["", "", "", "", "", "", "", "", "", "", "", "", "", 
"", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", 
"\u0007"]
+    def where_exprs = ["", "", "", "", "", "", "", "", "", "", "", "where 
p_partkey>10", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", 
"", "", ""]
+
+    def etl_info = ["unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=163706; dpp.abnorm.ALL=0; 
dpp.norm.ALL=36294",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "\\N",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "\\N",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=200000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=4096",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=100000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=10000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=10000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=10000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=10000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=10000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=10000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=10000",
+                    "unselected.rows=0; dpp.abnorm.ALL=0; dpp.norm.ALL=10000",
+    ]
+
+    def task_info = ["cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+                     "cluster:cos.ap-beijing.myqcloud.com; timeout(s):14400; 
max_filter_ratio:0.0",
+    ]
+
+    def error_msg = ["",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "[INTERNAL_ERROR]failed to find default value expr for 
slot: x1",
+                     "",
+                     "",
+                     "[INTERNAL_ERROR]failed to find default value expr for 
slot: x1",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+                     "",
+    ]
 
     String ak = getS3AK()
     String sk = getS3SK()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to