(doris) branch master updated (89bbad9db79 -> d8dd2027994)

2024-06-28 Thread gabriellee
This is an automated email from the ASF dual-hosted git repository.

gabriellee pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


from 89bbad9db79 [improvement](statistics)Disable fetch stats for iceberg 
table through Iceberg api by default. (#36931)
 add d8dd2027994 [refactor](pipeline) Delete unused functions (#36972)

No new revisions were added by this update.

Summary of changes:
 be/src/pipeline/exec/scan_operator.cpp|  5 -
 be/src/pipeline/exec/scan_operator.h  |  4 
 be/src/vec/runtime/vdata_stream_recvr.cpp | 19 ---
 be/src/vec/runtime/vdata_stream_recvr.h   | 11 ---
 4 files changed, 39 deletions(-)


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch master updated (d8dd2027994 -> 95594d6cab4)

2024-06-28 Thread dataroaring
This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


from d8dd2027994 [refactor](pipeline) Delete unused functions (#36972)
 add 95594d6cab4 [Fix](partial) Fix partial update delete case (#36985)

No new revisions were added by this update.

Summary of changes:
 .../test_new_partial_update_delete.out | 24 --
 .../test_new_partial_update_delete.groovy  | 12 +++
 2 files changed, 8 insertions(+), 28 deletions(-)


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch master updated: [opt](function) Optimize the trim function for single-char inputs (#36497)

2024-06-28 Thread gabriellee
This is an automated email from the ASF dual-hosted git repository.

gabriellee pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
 new f8361470bbd [opt](function) Optimize the trim function for single-char 
inputs (#36497)
f8361470bbd is described below

commit f8361470bbd87148a46cdafc3b4834dae50df71b
Author: Mryange <59914473+mrya...@users.noreply.github.com>
AuthorDate: Fri Jun 28 15:57:54 2024 +0800

[opt](function) Optimize the trim function for single-char inputs (#36497)

before
```
mysql [test]>select count(ltrim(str,"1")) from stringDb2;
++
| count(ltrim(str, '1')) |
++
|   6400 |
++
1 row in set (7.79 sec)
```

now
```
mysql [test]>select count(ltrim(str,"1")) from stringDb2;
++
| count(ltrim(str, '1')) |
++
|   6400 |
++
1 row in set (0.73 sec)
```
---
 be/src/util/simd/vstring_function.h| 196 ++---
 be/src/vec/functions/function_string.cpp   |  54 +++---
 .../correctness/test_trim_new_parameters.groovy|   3 +
 3 files changed, 92 insertions(+), 161 deletions(-)

diff --git a/be/src/util/simd/vstring_function.h 
b/be/src/util/simd/vstring_function.h
index dac964b1b94..4fff59a01df 100644
--- a/be/src/util/simd/vstring_function.h
+++ b/be/src/util/simd/vstring_function.h
@@ -17,6 +17,7 @@
 
 #pragma once
 
+#include 
 #include 
 
 #include 
@@ -100,169 +101,86 @@ public:
 /// n equals to 16 chars length
 static constexpr auto REGISTER_SIZE = sizeof(__m128i);
 #endif
-public:
-static StringRef rtrim(const StringRef& str) {
-if (str.size == 0) {
-return str;
-}
-auto begin = 0;
-int64_t end = str.size - 1;
-#if defined(__SSE2__) || defined(__aarch64__)
-char blank = ' ';
-const auto pattern = _mm_set1_epi8(blank);
-while (end - begin + 1 >= REGISTER_SIZE) {
-const auto v_haystack = _mm_loadu_si128(
-reinterpret_cast(str.data + end + 1 - 
REGISTER_SIZE));
-const auto v_against_pattern = _mm_cmpeq_epi8(v_haystack, pattern);
-const auto mask = _mm_movemask_epi8(v_against_pattern);
-int offset = __builtin_clz(~(mask << REGISTER_SIZE));
-/// means not found
-if (offset == 0) {
-return StringRef(str.data + begin, end - begin + 1);
-} else {
-end -= offset;
-}
-}
-#endif
-while (end >= begin && str.data[end] == ' ') {
---end;
-}
-if (end < 0) {
-return StringRef("");
-}
-return StringRef(str.data + begin, end - begin + 1);
-}
-
-static StringRef ltrim(const StringRef& str) {
-if (str.size == 0) {
-return str;
-}
-auto begin = 0;
-auto end = str.size - 1;
-#if defined(__SSE2__) || defined(__aarch64__)
-char blank = ' ';
-const auto pattern = _mm_set1_epi8(blank);
-while (end - begin + 1 >= REGISTER_SIZE) {
-const auto v_haystack =
-_mm_loadu_si128(reinterpret_cast(str.data 
+ begin));
-const auto v_against_pattern = _mm_cmpeq_epi8(v_haystack, pattern);
-const auto mask = _mm_movemask_epi8(v_against_pattern) ^ 0x;
-/// zero means not found
-if (mask == 0) {
-begin += REGISTER_SIZE;
-} else {
-const auto offset = __builtin_ctz(mask);
-begin += offset;
-return StringRef(str.data + begin, end - begin + 1);
-}
-}
-#endif
-while (begin <= end && str.data[begin] == ' ') {
-++begin;
-}
-return StringRef(str.data + begin, end - begin + 1);
-}
 
-static StringRef trim(const StringRef& str) {
-if (str.size == 0) {
-return str;
+template 
+static inline const unsigned char* rtrim(const unsigned char* begin, const 
unsigned char* end,
+ const StringRef& remove_str) {
+if (remove_str.size == 0) {
+return end;
 }
-return rtrim(ltrim(str));
-}
+const auto* p = end;
 
-static StringRef rtrim(const StringRef& str, const StringRef& rhs) {
-if (str.size == 0 || rhs.size == 0) {
-return str;
-}
-if (rhs.size == 1) {
-auto begin = 0;
-int64_t end = str.size - 1;
-const char blank = rhs.data[0];
-#if defined(__SSE2__) || defined(__aarch64__)
-const auto pattern = _mm_set1_epi8(blank);
-while (end - begin

(doris) branch branch-2.1 updated: [improvement](statistics)Use real base index id to fetch stats cache. (#36914) (#36992)

2024-06-28 Thread lijibing
This is an automated email from the ASF dual-hosted git repository.

lijibing pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 816899df414 [improvement](statistics)Use real base index id to fetch 
stats cache. (#36914) (#36992)
816899df414 is described below

commit 816899df4140aae749359a6abf5ab2367d1a7c3e
Author: Jibing-Li <64681310+jibing...@users.noreply.github.com>
AuthorDate: Fri Jun 28 16:22:20 2024 +0800

[improvement](statistics)Use real base index id to fetch stats cache. 
(#36914) (#36992)

For historical reason, statistics tables use -1 for OlapTable base index
id. This brings many if/else branch for stats calculate. This pr is to
screen the -1 for Nereids. The stats user could use the real base index
id to fetch stats cache. Will do the id translation inside the get cache
api.

backport: https://github.com/apache/doris/pull/36914
---
 .../apache/doris/statistics/StatisticsCache.java   | 23 ++
 1 file changed, 23 insertions(+)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsCache.java 
b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsCache.java
index e8ef250e674..d86e073d9e4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsCache.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsCache.java
@@ -18,6 +18,8 @@
 package org.apache.doris.statistics;
 
 import org.apache.doris.catalog.Env;
+import org.apache.doris.catalog.OlapTable;
+import org.apache.doris.catalog.TableIf;
 import org.apache.doris.common.ClientPool;
 import org.apache.doris.common.Config;
 import org.apache.doris.common.ThreadPoolManager;
@@ -79,6 +81,12 @@ public class StatisticsCache {
 if (ctx != null && ctx.getSessionVariable().internalSession) {
 return ColumnStatistic.UNKNOWN;
 }
+// Need to change base index id to -1 for OlapTable.
+try {
+idxId = changeBaseIndexId(catalogId, dbId, tblId, idxId);
+} catch (Exception e) {
+return ColumnStatistic.UNKNOWN;
+}
 StatisticsCacheKey k = new StatisticsCacheKey(catalogId, dbId, tblId, 
idxId, colName);
 try {
 CompletableFuture> f = 
columnStatisticsCache.get(k);
@@ -91,6 +99,21 @@ public class StatisticsCache {
 return ColumnStatistic.UNKNOWN;
 }
 
+// Base index id should be set to -1 for OlapTable. Because statistics 
tables use -1 for base index.
+// TODO: Need to use the real index id in statistics table in later 
version.
+private long changeBaseIndexId(long catalogId, long dbId, long tblId, long 
idxId) {
+if (idxId != -1) {
+TableIf table = StatisticsUtil.findTable(catalogId, dbId, tblId);
+if (table instanceof OlapTable) {
+OlapTable olapTable = (OlapTable) table;
+if (idxId == olapTable.getBaseIndexId()) {
+idxId = -1;
+}
+}
+}
+return idxId;
+}
+
 public Histogram getHistogram(long ctlId, long dbId, long tblId, String 
colName) {
 return getHistogram(ctlId, dbId, tblId, -1, colName).orElse(null);
 }


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch branch-2.1 updated: [improvement](statistics)Disable fetch stats for iceberg table through Iceberg api by default. (#36931) (#36999)

2024-06-28 Thread lijibing
This is an automated email from the ASF dual-hosted git repository.

lijibing pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 65e8bbf0f2c [improvement](statistics)Disable fetch stats for iceberg 
table through Iceberg api by default. (#36931) (#36999)
65e8bbf0f2c is described below

commit 65e8bbf0f2c8ec50118bcfd54889c1943421d86f
Author: Jibing-Li <64681310+jibing...@users.noreply.github.com>
AuthorDate: Fri Jun 28 16:23:25 2024 +0800

[improvement](statistics)Disable fetch stats for iceberg table through 
Iceberg api by default. (#36931) (#36999)

backport https://github.com/apache/doris/pull/36931
---
 .../java/org/apache/doris/datasource/hive/HMSExternalTable.java  | 8 ++--
 fe/fe-core/src/main/java/org/apache/doris/qe/GlobalVariable.java | 9 +
 2 files changed, 15 insertions(+), 2 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java
index 37042edde62..03eac33ab53 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java
@@ -573,10 +573,14 @@ public class HMSExternalTable extends ExternalTable 
implements MTMVRelatedTableI
 case HIVE:
 return getHiveColumnStats(colName);
 case ICEBERG:
-return StatisticsUtil.getIcebergColumnStats(colName,
+if (GlobalVariable.enableFetchIcebergStats) {
+return StatisticsUtil.getIcebergColumnStats(colName,
 
Env.getCurrentEnv().getExtMetaCacheMgr().getIcebergMetadataCache().getIcebergTable(
-catalog, dbName, name
+catalog, dbName, name
 ));
+} else {
+break;
+}
 default:
 LOG.warn("get column stats for dlaType {} is not supported.", 
dlaType);
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/GlobalVariable.java 
b/fe/fe-core/src/main/java/org/apache/doris/qe/GlobalVariable.java
index 14d8c35ff72..6eac0c2b815 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/GlobalVariable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/GlobalVariable.java
@@ -63,6 +63,9 @@ public final class GlobalVariable {
 public static final String DEFAULT_USING_META_CACHE_FOR_EXTERNAL_CATALOG
 = "default_using_meta_cache_for_external_catalog";
 
+public static final String ENABLE_FETCH_ICEBERG_STATS = 
"enable_fetch_iceberg_stats";
+
+
 @VariableMgr.VarAttr(name = VERSION_COMMENT, flag = VariableMgr.READ_ONLY)
 public static String versionComment = "Doris version "
 + Version.DORIS_BUILD_VERSION + "-" + 
Version.DORIS_BUILD_SHORT_HASH;
@@ -155,6 +158,12 @@ public final class GlobalVariable {
 "Only for compatibility with MySQL ecosystem, no practical 
meaning"})
 public static boolean super_read_only = true;
 
+@VariableMgr.VarAttr(name = ENABLE_FETCH_ICEBERG_STATS, flag = 
VariableMgr.GLOBAL,
+description = {
+"当HMS catalog中的Iceberg表没有统计信息时,是否通过Iceberg Api获取统计信息",
+"Enable fetch stats for HMS Iceberg table when it's not 
analyzed."})
+public static boolean enableFetchIcebergStats = false;
+
 // Don't allow creating instance.
 private GlobalVariable() {
 }


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch branch-2.0 updated: [fix](fe ut) fix unstable SystemInfoServiceTest #36893 (#36975)

2024-06-28 Thread dataroaring
This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.0 by this push:
 new f4a617a5927 [fix](fe ut) fix unstable SystemInfoServiceTest #36893 
(#36975)
f4a617a5927 is described below

commit f4a617a5927f777195ac9352e49c2f2345910175
Author: yujun 
AuthorDate: Fri Jun 28 16:30:03 2024 +0800

[fix](fe ut) fix unstable SystemInfoServiceTest #36893 (#36975)

cherry pick from #36893
---
 .../org/apache/doris/system/SystemInfoServiceTest.java| 15 +++
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java
index 9578ed1c7ff..6092a50330b 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java
@@ -46,6 +46,8 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
 
 public class SystemInfoServiceTest {
 private SystemInfoService infoService;
@@ -403,7 +405,7 @@ public class SystemInfoServiceTest {
 ReplicaAllocation replicaAlloc = ReplicaAllocation.DEFAULT_ALLOCATION;
 // also check if the random selection logic can evenly distribute the 
replica.
 Map beCounterMap = Maps.newHashMap();
-for (int i = 0; i < 1; ++i) {
+for (int i = 0; i < 3; ++i) {
 Pair>, TStorageMedium> ret = 
infoService.selectBackendIdsForReplicaCreation(replicaAlloc,
 Maps.newHashMap(), TStorageMedium.HDD, false, false);
 Map> res = ret.first;
@@ -412,11 +414,16 @@ public class SystemInfoServiceTest {
 beCounterMap.put(beId, beCounterMap.getOrDefault(beId, 0) + 1);
 }
 }
+Set expectBackendIds = infoService.getMixBackends().stream()
+.filter(Backend::isAlive).map(Backend::getId)
+.collect(Collectors.toSet());
+Assert.assertEquals(expectBackendIds, 
beCounterMap.keySet().stream().collect(Collectors.toSet()));
 List list = Lists.newArrayList(beCounterMap.values());
 Collections.sort(list);
-int diff = list.get(list.size() - 1) - list.get(0);
-// The max replica num and min replica num's diff is less than 5%.
-Assert.assertTrue((diff * 1.0 / list.get(0)) < 0.05);
+int max = list.get(list.size() - 1);
+int diff =  max - list.get(0);
+// The max replica num and min replica num's diff is less than 30%.
+Assert.assertTrue((diff * 1.0 / max) < 0.3);
 }
 
 private void addDisk(Backend be, String path, TStorageMedium medium, long 
totalB, long availB) {


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch branch-2.1 updated: [fix](fe ut) fix unstable SystemInfoServiceTest #36893 (#36974)

2024-06-28 Thread dataroaring
This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 47e56cc3260 [fix](fe ut) fix unstable SystemInfoServiceTest #36893 
(#36974)
47e56cc3260 is described below

commit 47e56cc326086981227115e13e4c29d07febda41
Author: yujun 
AuthorDate: Fri Jun 28 16:30:47 2024 +0800

[fix](fe ut) fix unstable SystemInfoServiceTest #36893 (#36974)

cherry pick from #36893
---
 .../org/apache/doris/system/SystemInfoServiceTest.java| 15 +++
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java
index e933c0df17c..eb1f33c2889 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/system/SystemInfoServiceTest.java
@@ -46,6 +46,8 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
 
 public class SystemInfoServiceTest {
 private SystemInfoService infoService;
@@ -403,7 +405,7 @@ public class SystemInfoServiceTest {
 ReplicaAllocation replicaAlloc = ReplicaAllocation.DEFAULT_ALLOCATION;
 // also check if the random selection logic can evenly distribute the 
replica.
 Map beCounterMap = Maps.newHashMap();
-for (int i = 0; i < 1; ++i) {
+for (int i = 0; i < 3; ++i) {
 Pair>, TStorageMedium> ret = 
infoService.selectBackendIdsForReplicaCreation(replicaAlloc,
 Maps.newHashMap(), TStorageMedium.HDD, false, false);
 Map> res = ret.first;
@@ -412,11 +414,16 @@ public class SystemInfoServiceTest {
 beCounterMap.put(beId, beCounterMap.getOrDefault(beId, 0) + 1);
 }
 }
+Set expectBackendIds = infoService.getMixBackends().stream()
+.filter(Backend::isAlive).map(Backend::getId)
+.collect(Collectors.toSet());
+Assert.assertEquals(expectBackendIds, 
beCounterMap.keySet().stream().collect(Collectors.toSet()));
 List list = Lists.newArrayList(beCounterMap.values());
 Collections.sort(list);
-int diff = list.get(list.size() - 1) - list.get(0);
-// The max replica num and min replica num's diff is less than 5%.
-Assert.assertTrue((diff * 1.0 / list.get(0)) < 0.05);
+int max = list.get(list.size() - 1);
+int diff =  max - list.get(0);
+// The max replica num and min replica num's diff is less than 30%.
+Assert.assertTrue((diff * 1.0 / max) < 0.3);
 }
 
 private void addDisk(Backend be, String path, TStorageMedium medium, long 
totalB, long availB) {


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch master updated (f8361470bbd -> 5765ffac430)

2024-06-28 Thread eldenmoon
This is an automated email from the ASF dual-hosted git repository.

eldenmoon pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


from f8361470bbd [opt](function) Optimize the trim function for single-char 
inputs (#36497)
 add 5765ffac430 [Fix](Variant) fix variant partial update with row store 
enabled (#36793)

No new revisions were added by this update.

Summary of changes:
 be/src/vec/columns/column_object.h |   8 --
 be/src/vec/common/schema_util.cpp  | 135 +
 be/src/vec/common/schema_util.h|   8 --
 .../data_types/serde/data_type_object_serde.cpp|  10 +-
 regression-test/data/variant_p0/delete_update.out  |   4 +-
 .../suites/variant_p0/delete_update.groovy |   8 +-
 6 files changed, 17 insertions(+), 156 deletions(-)


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch branch-2.1 updated: [improvement](partition rebalance) improve partition rebalance choose candidate speed #36509 (#36976)

2024-06-28 Thread dataroaring
This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new b46c86f352b [improvement](partition rebalance) improve partition 
rebalance choose candidate speed #36509 (#36976)
b46c86f352b is described below

commit b46c86f352b1dc9b0e8cd607be616722c7f642fd
Author: yujun 
AuthorDate: Fri Jun 28 16:31:50 2024 +0800

[improvement](partition rebalance) improve partition rebalance choose 
candidate speed #36509 (#36976)

cherry pick from #36509
---
 .../apache/doris/clone/PartitionRebalancer.java| 79 +++---
 .../java/org/apache/doris/clone/Rebalancer.java|  3 +
 .../org/apache/doris/clone/TabletScheduler.java| 13 ++--
 .../java/org/apache/doris/clone/PathSlotTest.java  |  5 +-
 .../doris/cluster/DecommissionBackendTest.java |  1 -
 5 files changed, 66 insertions(+), 35 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java 
b/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java
index 23e13e9161b..7095ad8dc54 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java
@@ -30,8 +30,8 @@ import org.apache.doris.thrift.TStorageMedium;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 import com.google.common.collect.Ordering;
+import com.google.common.collect.Sets;
 import com.google.common.collect.TreeMultimap;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
@@ -41,7 +41,9 @@ import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
 import java.util.Random;
+import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.BiPredicate;
 import java.util.stream.Collectors;
 
 /*
@@ -121,44 +123,64 @@ public class PartitionRebalancer extends Rebalancer {
 = algo.getNextMoves(clusterBalanceInfo, 
Config.partition_rebalance_max_moves_num_per_selection);
 
 List alternativeTablets = Lists.newArrayList();
-List inProgressIds = movesInProgressList.stream().map(m -> 
m.tabletId).collect(Collectors.toList());
+Set inProgressIds = movesInProgressList.stream().map(m -> 
m.tabletId).collect(Collectors.toSet());
+Random rand = new SecureRandom();
 for (TwoDimensionalGreedyRebalanceAlgo.PartitionMove move : moves) {
 // Find all tablets of the specified partition that would have a 
replica at the source be,
 // but would not have a replica at the destination be. That is to 
satisfy the restriction
 // of having no more than one replica of the same tablet per be.
 List tabletIds = 
invertedIndex.getTabletIdsByBackendIdAndStorageMedium(move.fromBe, medium);
-List invalidIds = 
invertedIndex.getTabletIdsByBackendIdAndStorageMedium(move.toBe, medium);
-tabletIds.removeAll(invalidIds);
-// In-progress tablets can't be the candidate too.
-tabletIds.removeAll(inProgressIds);
+if (tabletIds.isEmpty()) {
+continue;
+}
+
+Set invalidIds = Sets.newHashSet(
+
invertedIndex.getTabletIdsByBackendIdAndStorageMedium(move.toBe, medium));
 
-Map tabletCandidates = Maps.newHashMap();
-for (long tabletId : tabletIds) {
+BiPredicate canMoveTablet = (Long tabletId, 
TabletMeta tabletMeta) -> {
+return tabletMeta != null
+&& tabletMeta.getPartitionId() == move.partitionId
+&& tabletMeta.getIndexId() == move.indexId
+&& !invalidIds.contains(tabletId)
+&& !inProgressIds.contains(tabletId);
+};
+
+// Random pick one candidate to create tabletSchedCtx
+int startIdx = rand.nextInt(tabletIds.size());
+long pickedTabletId = -1L;
+TabletMeta pickedTabletMeta = null;
+for (int i = startIdx; i < tabletIds.size(); i++) {
+long tabletId = tabletIds.get(i);
 TabletMeta tabletMeta = invertedIndex.getTabletMeta(tabletId);
-if (tabletMeta != null && tabletMeta.getPartitionId() == 
move.partitionId
-&& tabletMeta.getIndexId() == move.indexId) {
-tabletCandidates.put(tabletId, tabletMeta);
+if (canMoveTablet.test(tabletId, tabletMeta)) {
+pickedTabletId = tabletId;
+pickedTabletMeta = tabletMeta;
+break;
 }
 }
-if (LOG.isDebugEnabled()) {
-  

(doris) branch branch-2.0 updated: [improvement](partition rebalance) improve partition rebalance choose candidate speed #36509 (#36978)

2024-06-28 Thread dataroaring
This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.0 by this push:
 new 77765b23b99 [improvement](partition rebalance) improve partition 
rebalance choose candidate speed #36509 (#36978)
77765b23b99 is described below

commit 77765b23b9967924b25f68851916332b9e0d1e28
Author: yujun 
AuthorDate: Fri Jun 28 16:31:16 2024 +0800

[improvement](partition rebalance) improve partition rebalance choose 
candidate speed #36509 (#36978)

cherry pick from #36509
---
 .../apache/doris/clone/PartitionRebalancer.java| 78 +++---
 .../java/org/apache/doris/clone/Rebalancer.java|  3 +
 .../org/apache/doris/clone/TabletScheduler.java| 13 ++--
 .../java/org/apache/doris/clone/PathSlotTest.java  |  5 +-
 .../doris/cluster/DecommissionBackendTest.java |  1 -
 5 files changed, 68 insertions(+), 32 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java 
b/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java
index fd70e5116f2..25d85822edb 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java
@@ -29,17 +29,20 @@ import org.apache.doris.thrift.TStorageMedium;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 import com.google.common.collect.Ordering;
+import com.google.common.collect.Sets;
 import com.google.common.collect.TreeMultimap;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
+import java.security.SecureRandom;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
 import java.util.Random;
+import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.BiPredicate;
 import java.util.stream.Collectors;
 
 /*
@@ -115,40 +118,64 @@ public class PartitionRebalancer extends Rebalancer {
 = algo.getNextMoves(clusterBalanceInfo, 
Config.partition_rebalance_max_moves_num_per_selection);
 
 List alternativeTablets = Lists.newArrayList();
-List inProgressIds = movesInProgressList.stream().map(m -> 
m.tabletId).collect(Collectors.toList());
+Set inProgressIds = movesInProgressList.stream().map(m -> 
m.tabletId).collect(Collectors.toSet());
+Random rand = new SecureRandom();
 for (TwoDimensionalGreedyRebalanceAlgo.PartitionMove move : moves) {
 // Find all tablets of the specified partition that would have a 
replica at the source be,
 // but would not have a replica at the destination be. That is to 
satisfy the restriction
 // of having no more than one replica of the same tablet per be.
 List tabletIds = 
invertedIndex.getTabletIdsByBackendIdAndStorageMedium(move.fromBe, medium);
-List invalidIds = 
invertedIndex.getTabletIdsByBackendIdAndStorageMedium(move.toBe, medium);
-tabletIds.removeAll(invalidIds);
-// In-progress tablets can't be the candidate too.
-tabletIds.removeAll(inProgressIds);
+if (tabletIds.isEmpty()) {
+continue;
+}
+
+Set invalidIds = Sets.newHashSet(
+
invertedIndex.getTabletIdsByBackendIdAndStorageMedium(move.toBe, medium));
+
+BiPredicate canMoveTablet = (Long tabletId, 
TabletMeta tabletMeta) -> {
+return tabletMeta != null
+&& tabletMeta.getPartitionId() == move.partitionId
+&& tabletMeta.getIndexId() == move.indexId
+&& !invalidIds.contains(tabletId)
+&& !inProgressIds.contains(tabletId);
+};
 
-Map tabletCandidates = Maps.newHashMap();
-for (long tabletId : tabletIds) {
+// Random pick one candidate to create tabletSchedCtx
+int startIdx = rand.nextInt(tabletIds.size());
+long pickedTabletId = -1L;
+TabletMeta pickedTabletMeta = null;
+for (int i = startIdx; i < tabletIds.size(); i++) {
+long tabletId = tabletIds.get(i);
 TabletMeta tabletMeta = invertedIndex.getTabletMeta(tabletId);
-if (tabletMeta != null && tabletMeta.getPartitionId() == 
move.partitionId
-&& tabletMeta.getIndexId() == move.indexId) {
-tabletCandidates.put(tabletId, tabletMeta);
+if (canMoveTablet.test(tabletId, tabletMeta)) {
+pickedTabletId = tabletId;
+pickedTabletMeta = tabletMeta;
+break;
 }
 }
-LOG.debug("Find {} c

(doris) branch branch-2.1 updated: [case](udf) Only one backend, skip scp udf file (#36810) (#36964)

2024-06-28 Thread hellostephen
This is an automated email from the ASF dual-hosted git repository.

hellostephen pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new fe75cbc9271 [case](udf) Only one backend, skip scp udf file (#36810) 
(#36964)
fe75cbc9271 is described below

commit fe75cbc92716d097d58c23599d8144db7f2a2923
Author: Dongyang Li 
AuthorDate: Fri Jun 28 16:31:30 2024 +0800

[case](udf) Only one backend, skip scp udf file (#36810) (#36964)

backport #36810
---
 .../groovy/org/apache/doris/regression/suite/Suite.groovy | 15 +++
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
index e94331346d7..c67fcd88616 100644
--- 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
+++ 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
@@ -732,7 +732,7 @@ class Suite implements GroovyInterceptable {
 Assert.assertEquals(0, code)
 }
 
-void sshExec(String username, String host, String cmd) {
+void sshExec(String username, String host, String cmd, boolean alert=true) 
{
 String command = "ssh ${username}@${host} '${cmd}'"
 def cmds = ["/bin/bash", "-c", command]
 logger.info("Execute: ${cmds}".toString())
@@ -740,8 +740,10 @@ class Suite implements GroovyInterceptable {
 def errMsg = new StringBuilder()
 def msg = new StringBuilder()
 p.waitForProcessOutput(msg, errMsg)
-assert errMsg.length() == 0: "error occurred!" + errMsg
-assert p.exitValue() == 0
+if (alert) {
+assert errMsg.length() == 0: "error occurred!\n" + errMsg
+assert p.exitValue() == 0
+}
 }
 
 List getFrontendIpHttpPort() {
@@ -1285,10 +1287,15 @@ class Suite implements GroovyInterceptable {
 def backendId_to_backendIP = [:]
 def backendId_to_backendHttpPort = [:]
 getBackendIpHttpPort(backendId_to_backendIP, 
backendId_to_backendHttpPort)
+if(backendId_to_backendIP.size() == 1) {
+logger.info("Only one backend, skip scp udf file")
+return
+}
 
 def udf_file_dir = new File(udf_file_path).parent
 backendId_to_backendIP.values().each { be_ip ->
-sshExec ("root", be_ip, "ssh -o StrictHostKeyChecking=no 
root@${be_ip} \"mkdir -p ${udf_file_dir}\"")
+sshExec("root", be_ip, "ssh-keygen -f '/root/.ssh/known_hosts' -R 
\"${be_ip}\"", false)
+sshExec("root", be_ip, "ssh -o StrictHostKeyChecking=no 
root@${be_ip} \"mkdir -p ${udf_file_dir}\"", false)
 scpFiles("root", be_ip, udf_file_path, udf_file_path, false)
 }
 }


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch branch-3.0 updated (b240a393747 -> fef6c7df5b8)

2024-06-28 Thread dataroaring
This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a change to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


from b240a393747 [Bug](agg-state) set agg_state's be_exec_version on cloud 
mode (#36822)
 new d9589864ad7 [improvement](mysql catalog) disable mysql 
AbandonedConnectionCleanup Thread (#36655)
 new d66b29242aa [Feature](json_functions) support json-keys  (#36411)
 new 836274e4522 [chore](client) Reopen client if exception is catched 
(#36808)
 new 2a274af831c [Bug](materialized-view) fix result is nulable have wrong 
value on multi_distinct_sum (#36766)
 new 202a2cee858 [fix](statistics)Use ConcurrentHashMap to avoid 
ConcurrentModificationException (#36452)
 new 96fda76d0d9 [test](mtmv)Add predicate filter case for various join 
methods (#32888)
 new f3200c44334 [minor](compile) Fix compiling in debug mode (#36838)
 new d6576d55d1f [Fix](inverted index) fix wrong segment file path when 
doing segcompaction (#36825)
 new 3632df71233 [fix](cloud) Disable stream load and http stream 2PC for 
MoW table (#36816)
 new 5dbe997b321 [fix](statistics)Fix select mv with specified partitions 
bug. (#36817)
 new fc8d749a4c9 [Chore](execution) remove unused function 
get_least_supertype (#36743)
 new be5f699e1b3 [chore](Azure) Print Azure request failed message (#36794)
 new 2125f70f1f5 [fix](fe) Skip building MTMV cache in the checking 
compatibility mode (#36844)
 new 5087f7d7650 [fix](arrow-flight-sql) Fix arrow flight result sink 
(#36827)
 new 64f1c90c0ff [fix](protocol) only return multi result when 
CLIENT_MULTI_STATEMENTS been set (#36759)
 new 3060f4f3890 [improve](udf) support java-udf static load (#34980)
 new 5519241e02d [ci](cloud) adjust regression conf (#36846)
 new 34839a347cb [test](mtmv)add join infer and derive test case (#32860)
 new 940cbbe235f  [fix](generated column) static variables should not be 
used in ExpressionToExpr (#36824)
 new 61da3a07f65 [regression-test](prepared statement) fix unstable 
prepared_stmt_p0 (#36833)
 new b0f65ae8e35 [fix](schemachange) Avoid drop index in checking 
compatibility mode and checkpoint thread (#36820)
 new 7d09230ed6b [Chore](GA)Use github's codeowner to implement maintainer 
review (#36852)
 new c1fe915f93f [Fix](delete command) Mark delete sign when do delete 
command in MoW table (#35917)
 new f21c845687d [case](udf) Only one backend, skip scp udf file (#36810)
 new 91768833b69 [test] fix workload policy test failed (#36837)
 new 1efbb90b052 [fix](array)fix array with empty arg in be behavior 
(#36845)
 new d269560e588 [test](mtmv)Add group by aggregate negative case (#36562)
 new bb59b4df273 [opt](Nereids) Optimize findValidItems method to handle 
circular dependencies (#36839)
 new 0c5ce6e0546 [enhance](Azure) Check delete operation's response on 
Azure Blob Storage (#36800)
 new f44dcd2f425 [fix](spill) fix memory orphan check failure of 
partitioned hash join (#36806)
 new 5821f0f93bb [refactor](inverted index) Refactor the idx storage format 
to avoid cyclic references in Thrift files. (#36757)
 new 470030e4ce5 [fix](pipeline) fix exception safety issue in 
MultiCastDataStreamer (#36748)
 new 6731e94231f [test]add check for query release when p0 finish (#36660)
 new 8d4e6e23108 [bug](meta) fix can't deserialize meta from gson about 
polymorphic function class (#36847)
 new d22f4b14e45 [enhance](mtmv)support partition tvf (#36479)
 new 978aba32bca [improvement](meta) Switch meta serialization to gson 4 
(#36568)
 new e945a1b5815 [fix](load) fix no error url if no partition can be found 
(#36831)
 new 49df2694238 [improvement](clone) dead be will abort sched task (#36795)
 new 2589dd5bdf5 [improvement](balance) partition rebalance chose disk by 
rr (#36826)
 new f0951e172e3 [chore](rpc) Throw exception when use RPC in ckpt thread 
or the compatiblility mode (#36856)
 new afde3109137 [fix](load) Fix wrong results for high-concurrent loading 
(#36841)
 new 9a9fb7d8052 [feat](Nereids) after partition prune, output rows of scan 
node only contains rows from selected partitions  (#36760)
 new 3934e21c2f2 [chore](query) print query id when killed by timeout 
checker (#36868)
 new 6f924c7fdf3 [fix](regression test) Disable the case in cloud mode 
(#36769)
 new fd6ecbc9b42 [Featrue](default value) add pi as default value (#36280)
 new 51ab613b275 [Enhance](Routine Load) enhance routine load get topic 
metadata (#35651)
 new af68f527e5d [fix](test)fix regression test case failure (#36391)
 new 8c5eddf7d2e [enhancement](compaction) adjust compaction concurrency 
based on compaction score and workload (#36672)
 new 230d7ff94e1 [regression](kerberos)add hive kerberos docker regression 
env (#36430)
 new 2bc30552971 [test](auth)add upgrade and downgrade compatibility test 
case (#34489)

(doris) branch branch-2.1 updated: [fix](autoinc) avoid duplicated auto inc when role of fe changes (#36961)

2024-06-28 Thread dataroaring
This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 72de9c1b8fa [fix](autoinc) avoid duplicated auto inc when role of fe 
changes (#36961)
72de9c1b8fa is described below

commit 72de9c1b8fa930043d2dfb50d293447fac85a40c
Author: Yongqiang YANG <98214048+dataroar...@users.noreply.github.com>
AuthorDate: Fri Jun 28 16:54:48 2024 +0800

[fix](autoinc) avoid duplicated auto inc when role of fe changes (#36961)

## Proposed changes

pick #36960

Issue Number: close #xxx


---
 .../java/org/apache/doris/catalog/AutoIncrementGenerator.java| 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/AutoIncrementGenerator.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/AutoIncrementGenerator.java
index 9528f07e0ca..be110360850 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/AutoIncrementGenerator.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/AutoIncrementGenerator.java
@@ -23,6 +23,7 @@ import org.apache.doris.common.io.Text;
 import org.apache.doris.common.io.Writable;
 import org.apache.doris.persist.AutoIncrementIdUpdateLog;
 import org.apache.doris.persist.EditLog;
+import org.apache.doris.persist.gson.GsonPostProcessable;
 import org.apache.doris.persist.gson.GsonUtils;
 
 import com.google.common.base.Preconditions;
@@ -34,7 +35,7 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
-public class AutoIncrementGenerator implements Writable {
+public class AutoIncrementGenerator implements Writable, GsonPostProcessable {
 private static final Logger LOG = 
LogManager.getLogger(AutoIncrementGenerator.class);
 
 public static final long NEXT_ID_INIT_VALUE = 1;
@@ -102,4 +103,10 @@ public class AutoIncrementGenerator implements Writable {
 public static AutoIncrementGenerator read(DataInput in) throws IOException 
{
 return GsonUtils.GSON.fromJson(Text.readString(in), 
AutoIncrementGenerator.class);
 }
+
+@Override
+public void gsonPostProcess() throws IOException {
+nextId = batchEndId;
+}
+
 }


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch branch-2.1 updated: [fix](inverted index)Make build index operation only affect base index (#36869) (#36988)

2024-06-28 Thread kxiao
This is an automated email from the ASF dual-hosted git repository.

kxiao pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 4bec08c7e89 [fix](inverted index)Make build index operation only 
affect base index (#36869) (#36988)
4bec08c7e89 is described below

commit 4bec08c7e89cfa185f86c6d17f3f2ad7541fb274
Author: qiye 
AuthorDate: Fri Jun 28 16:57:07 2024 +0800

[fix](inverted index)Make build index operation only affect base index 
(#36869) (#36988)

backport #36869
---
 .../apache/doris/alter/SchemaChangeHandler.java|   2 +
 .../test_add_drop_index_on_table_with_mv.groovy| 135 +
 2 files changed, 137 insertions(+)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java 
b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
index a3572caad3e..e9cf9ddbc80 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
@@ -2932,6 +2932,8 @@ public class SchemaChangeHandler extends AlterHandler {
 // for now table's state can only be NORMAL
 Preconditions.checkState(olapTable.getState() == 
OlapTableState.NORMAL, olapTable.getState().name());
 
+// remove the index which is not the base index, only base index can 
be built inverted index
+indexSchemaMap.entrySet().removeIf(entry -> 
!entry.getKey().equals(olapTable.getBaseIndexId()));
 // begin checking each table
 Map> changedIndexIdToSchema = Maps.newHashMap();
 try {
diff --git 
a/regression-test/suites/inverted_index_p0/index_change/test_add_drop_index_on_table_with_mv.groovy
 
b/regression-test/suites/inverted_index_p0/index_change/test_add_drop_index_on_table_with_mv.groovy
new file mode 100644
index 000..7b63521178e
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_change/test_add_drop_index_on_table_with_mv.groovy
@@ -0,0 +1,135 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_add_drop_index_on_table_with_mv") {
+def tableName = "test_add_drop_index_on_table_with_mv"
+
+def timeout = 6
+def delta_time = 1000
+def alter_res = "null"
+def useTime = 0
+def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+for(int t = delta_time; t <= OpTimeout; t += delta_time){
+alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+alter_res = alter_res.toString()
+if(alter_res.contains("FINISHED")) {
+sleep(1) // wait change table state to normal
+logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+break
+}
+useTime = t
+sleep(delta_time)
+}
+assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+}
+
+def wait_for_build_index_on_partition_finish = { table_name, OpTimeout ->
+for(int t = delta_time; t <= OpTimeout; t += delta_time){
+alter_res = sql """SHOW BUILD INDEX WHERE TableName = 
"${table_name}";"""
+def expected_finished_num = alter_res.size();
+logger.info("expected_finished_num: " + expected_finished_num)
+// check only base table build index job
+assertEquals(1, expected_finished_num)
+def finished_num = 0;
+for (int i = 0; i < expected_finished_num; i++) {
+logger.info(table_name + " build index job state: " + 
alter_res[i][7] + i)
+if (alter_res[i][7] == "FINISHED") {
+++finished_num;
+}
+}
+if (finished_num == expected_finished_num) {
+logger.info(table_name + " all build index jobs finished, 
detail: " + alter_res)
+break
+}
+useTime = t
+sleep(delta_time)
+}
+assertTrue(useTime <= OpTimeout, 
"wait_for_latest_b

(doris) branch branch-2.0 updated: [fix](inverted index)Make build index operation only affect base index (#36869) (#36991)

2024-06-28 Thread kxiao
This is an automated email from the ASF dual-hosted git repository.

kxiao pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.0 by this push:
 new 6b3cb56c35d [fix](inverted index)Make build index operation only 
affect base index (#36869) (#36991)
6b3cb56c35d is described below

commit 6b3cb56c35d3cfd270c01003b74c1e7493473727
Author: qiye 
AuthorDate: Fri Jun 28 16:57:23 2024 +0800

[fix](inverted index)Make build index operation only affect base index 
(#36869) (#36991)
---
 .../apache/doris/alter/SchemaChangeHandler.java|   2 +
 .../test_add_drop_index_on_table_with_mv.groovy| 135 +
 2 files changed, 137 insertions(+)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java 
b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
index 976f837a311..dae99f40a36 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
@@ -2877,6 +2877,8 @@ public class SchemaChangeHandler extends AlterHandler {
 // for now table's state can only be NORMAL
 Preconditions.checkState(olapTable.getState() == 
OlapTableState.NORMAL, olapTable.getState().name());
 
+// remove the index which is not the base index, only base index can 
be built inverted index
+indexSchemaMap.entrySet().removeIf(entry -> 
!entry.getKey().equals(olapTable.getBaseIndexId()));
 // begin checking each table
 Map> changedIndexIdToSchema = Maps.newHashMap();
 try {
diff --git 
a/regression-test/suites/inverted_index_p0/index_change/test_add_drop_index_on_table_with_mv.groovy
 
b/regression-test/suites/inverted_index_p0/index_change/test_add_drop_index_on_table_with_mv.groovy
new file mode 100644
index 000..7b63521178e
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_change/test_add_drop_index_on_table_with_mv.groovy
@@ -0,0 +1,135 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_add_drop_index_on_table_with_mv") {
+def tableName = "test_add_drop_index_on_table_with_mv"
+
+def timeout = 6
+def delta_time = 1000
+def alter_res = "null"
+def useTime = 0
+def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+for(int t = delta_time; t <= OpTimeout; t += delta_time){
+alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+alter_res = alter_res.toString()
+if(alter_res.contains("FINISHED")) {
+sleep(1) // wait change table state to normal
+logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+break
+}
+useTime = t
+sleep(delta_time)
+}
+assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+}
+
+def wait_for_build_index_on_partition_finish = { table_name, OpTimeout ->
+for(int t = delta_time; t <= OpTimeout; t += delta_time){
+alter_res = sql """SHOW BUILD INDEX WHERE TableName = 
"${table_name}";"""
+def expected_finished_num = alter_res.size();
+logger.info("expected_finished_num: " + expected_finished_num)
+// check only base table build index job
+assertEquals(1, expected_finished_num)
+def finished_num = 0;
+for (int i = 0; i < expected_finished_num; i++) {
+logger.info(table_name + " build index job state: " + 
alter_res[i][7] + i)
+if (alter_res[i][7] == "FINISHED") {
+++finished_num;
+}
+}
+if (finished_num == expected_finished_num) {
+logger.info(table_name + " all build index jobs finished, 
detail: " + alter_res)
+break
+}
+useTime = t
+sleep(delta_time)
+}
+assertTrue(useTime <= OpTimeout, 
"wait_for_latest_build_index_on_partition_f

(doris) branch branch-2.0 updated: [test](ES Catalog) Add test cases for ES 5.x (#34441) (#36996)

2024-06-28 Thread kxiao
This is an automated email from the ASF dual-hosted git repository.

kxiao pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.0 by this push:
 new 12e53653eae [test](ES Catalog) Add test cases for ES 5.x (#34441) 
(#36996)
12e53653eae is described below

commit 12e53653eae515afa5a2810ae3f8bfb089e27e1e
Author: qiye 
AuthorDate: Fri Jun 28 16:58:21 2024 +0800

[test](ES Catalog) Add test cases for ES 5.x (#34441) (#36996)
---
 .../docker-compose/elasticsearch/es.env|  1 +
 .../docker-compose/elasticsearch/es.yaml.tpl   | 26 +
 .../elasticsearch/scripts/data/data3_es5.json  | 28 ++
 .../elasticsearch/scripts/es_init.sh   | 21 
 regression-test/conf/regression-conf.groovy|  1 +
 .../data/external_table_p0/es/test_es_query.out| 62 ++
 .../pipeline/external/conf/regression-conf.groovy  |  1 +
 .../external_table_p0/es/test_es_query.groovy  | 30 +++
 8 files changed, 170 insertions(+)

diff --git a/docker/thirdparties/docker-compose/elasticsearch/es.env 
b/docker/thirdparties/docker-compose/elasticsearch/es.env
index 0b8138fb340..a98cc1c3663 100644
--- a/docker/thirdparties/docker-compose/elasticsearch/es.env
+++ b/docker/thirdparties/docker-compose/elasticsearch/es.env
@@ -19,3 +19,4 @@
 DOCKER_ES_6_EXTERNAL_PORT=19200
 DOCKER_ES_7_EXTERNAL_PORT=29200
 DOCKER_ES_8_EXTERNAL_PORT=39200
+DOCKER_ES_5_EXTERNAL_PORT=59200
diff --git a/docker/thirdparties/docker-compose/elasticsearch/es.yaml.tpl 
b/docker/thirdparties/docker-compose/elasticsearch/es.yaml.tpl
index cee7c2748a8..fc1d3245432 100644
--- a/docker/thirdparties/docker-compose/elasticsearch/es.yaml.tpl
+++ b/docker/thirdparties/docker-compose/elasticsearch/es.yaml.tpl
@@ -18,6 +18,31 @@
 version: "3.9"
 
 services:
+  doris--es_5:
+image: elasticsearch:5.6.16
+ports:
+  - ${DOCKER_ES_5_EXTERNAL_PORT}:9200
+environment:
+  cluster.name: "elasticsearch5"
+  ES_JAVA_OPTS: "-Xms256m -Xmx256m"
+  discovery.type: "single-node"
+  xpack.security.enabled: "false"
+  cluster.routing.allocation.disk.threshold_enabled: true 
+  cluster.routing.allocation.disk.watermark.low: 500mb 
+  cluster.routing.allocation.disk.watermark.high: 300mb
+  cluster.routing.allocation.disk.watermark.flood_stage: 200mb
+  ES_LOG_STYLE: "file"
+volumes:
+  - ./data/es5/:/usr/share/elasticsearch/data
+  - ./logs/es5/:/usr/share/elasticsearch/logs
+  - 
./config/es5/log4j2.properties:/usr/share/elasticsearch/log4j2.properties
+networks:
+  - doris--es
+healthcheck:
+  test: [ "CMD", "curl", 
"localhost:9200/_cluster/health?wait_for_status=green" ]
+  interval: 30s
+  timeout: 10s
+  retries: 100
   doris--es_6:
 # es official not provide 6.x image for arm/v8, use compatible image.
 # https://github.com/dockhippie/elasticsearch/tree/master/v6.8
@@ -96,6 +121,7 @@ services:
 volumes:
   - ./scripts/:/mnt/scripts
 environment:
+  ES_5_HOST: "doris--es_5"
   ES_6_HOST: "doris--es_6"
   ES_7_HOST: "doris--es_7"
   ES_8_HOST: "doris--es_8"
diff --git 
a/docker/thirdparties/docker-compose/elasticsearch/scripts/data/data3_es5.json 
b/docker/thirdparties/docker-compose/elasticsearch/scripts/data/data3_es5.json
new file mode 100755
index 000..f4cc19ff9ec
--- /dev/null
+++ 
b/docker/thirdparties/docker-compose/elasticsearch/scripts/data/data3_es5.json
@@ -0,0 +1,28 @@
+{
+  "test1": "string3",
+  "test2": "text3_4*5",
+  "test3": 5.0,
+  "test4": "2022-08-08",
+  "test5": .22,
+  "test6": "2022-08-08T12:10:10.151",
+  "c_bool": [true, false, true, true],
+  "c_byte": [1, -2, -3, 4],
+  "c_short": [128, 129, -129, -130],
+  "c_integer": [32768, 32769, -32769, -32770],
+  "c_long": [-1, 0, 1, 2],
+  "c_unsigned_long": [0, 1, 2, 3],
+  "c_float": [1.0, 1.1, 1.2, 1.3],
+  "c_half_float": [1, 2, 3, 4],
+  "c_double": [1, 2, 3, 4],
+  "c_scaled_float": [1, 2, 3, 4],
+  "c_date": ["2020-01-01", "2020-01-02"],
+  "c_datetime": ["2020-01-01 12:00:00", "2020-01-02 13:01:01"],
+  "c_keyword": ["a", "b", "c"],
+  "c_text": ["d", "e", "f"],
+  "c_ip": ["192.168.0.1", "127.0.0.1"],
+  "c_person": [
+{"name": "Andy", "age": 18},
+{"name": "Tim", "age": 28}
+  ],
+  "message": "I'm not null or empty"
+}
diff --git 
a/docker/thirdparties/docker-compose/elasticsearch/scripts/es_init.sh 
b/docker/thirdparties/docker-compose/elasticsearch/scripts/es_init.sh
index 51364bbdf82..5c865e660ad 100755
--- a/docker/thirdparties/docker-compose/elasticsearch/scripts/es_init.sh
+++ b/docker/thirdparties/docker-compose/elasticsearch/scripts/es_init.sh
@@ -16,6 +16,27 @@
 # specific language governing permissions and limitations
 # under the License.
 
+# es 5
+# create index test1
+# shellcheck disable=SC2154
+curl "http://${ES_5_HOST}:9200/test1"; -H "Content-Type:application

(doris) branch branch-2.1 updated: [test](ES Catalog) Add test cases for ES 5.x (#34441) (#36993)

2024-06-28 Thread kxiao
This is an automated email from the ASF dual-hosted git repository.

kxiao pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 4dcceaefea2 [test](ES Catalog) Add test cases for ES 5.x (#34441) 
(#36993)
4dcceaefea2 is described below

commit 4dcceaefea24d24ba5d30bf30e7365f8a5c32fbb
Author: qiye 
AuthorDate: Fri Jun 28 16:58:07 2024 +0800

[test](ES Catalog) Add test cases for ES 5.x (#34441) (#36993)

backport #34441
---
 .../docker-compose/elasticsearch/es.env|  1 +
 .../docker-compose/elasticsearch/es.yaml.tpl   | 26 +
 .../elasticsearch/scripts/data/data3_es5.json  | 28 ++
 .../elasticsearch/scripts/es_init.sh   | 21 
 regression-test/conf/regression-conf.groovy|  1 +
 .../data/external_table_p0/es/test_es_query.out| 62 ++
 .../pipeline/external/conf/regression-conf.groovy  |  1 +
 .../external_table_p0/es/test_es_query.groovy  | 30 +++
 8 files changed, 170 insertions(+)

diff --git a/docker/thirdparties/docker-compose/elasticsearch/es.env 
b/docker/thirdparties/docker-compose/elasticsearch/es.env
index 0b8138fb340..a98cc1c3663 100644
--- a/docker/thirdparties/docker-compose/elasticsearch/es.env
+++ b/docker/thirdparties/docker-compose/elasticsearch/es.env
@@ -19,3 +19,4 @@
 DOCKER_ES_6_EXTERNAL_PORT=19200
 DOCKER_ES_7_EXTERNAL_PORT=29200
 DOCKER_ES_8_EXTERNAL_PORT=39200
+DOCKER_ES_5_EXTERNAL_PORT=59200
diff --git a/docker/thirdparties/docker-compose/elasticsearch/es.yaml.tpl 
b/docker/thirdparties/docker-compose/elasticsearch/es.yaml.tpl
index 25415380906..5acbec13465 100644
--- a/docker/thirdparties/docker-compose/elasticsearch/es.yaml.tpl
+++ b/docker/thirdparties/docker-compose/elasticsearch/es.yaml.tpl
@@ -18,6 +18,31 @@
 version: "3.9"
 
 services:
+  doris--es_5:
+image: elasticsearch:5.6.16
+ports:
+  - ${DOCKER_ES_5_EXTERNAL_PORT}:9200
+environment:
+  cluster.name: "elasticsearch5"
+  ES_JAVA_OPTS: "-Xms256m -Xmx256m"
+  discovery.type: "single-node"
+  xpack.security.enabled: "false"
+  cluster.routing.allocation.disk.threshold_enabled: true 
+  cluster.routing.allocation.disk.watermark.low: 500mb 
+  cluster.routing.allocation.disk.watermark.high: 300mb
+  cluster.routing.allocation.disk.watermark.flood_stage: 200mb
+  ES_LOG_STYLE: "file"
+volumes:
+  - ./data/es5/:/usr/share/elasticsearch/data
+  - ./logs/es5/:/usr/share/elasticsearch/logs
+  - 
./config/es5/log4j2.properties:/usr/share/elasticsearch/log4j2.properties
+networks:
+  - doris--es
+healthcheck:
+  test: [ "CMD", "curl", 
"localhost:9200/_cluster/health?wait_for_status=green" ]
+  interval: 30s
+  timeout: 10s
+  retries: 100
   doris--es_6:
 # es official not provide 6.x image for arm/v8, use compatible image.
 # https://github.com/dockhippie/elasticsearch/tree/master/v6.8
@@ -96,6 +121,7 @@ services:
 volumes:
   - ./scripts/:/mnt/scripts
 environment:
+  ES_5_HOST: "doris--es_5"
   ES_6_HOST: "doris--es_6"
   ES_7_HOST: "doris--es_7"
   ES_8_HOST: "doris--es_8"
diff --git 
a/docker/thirdparties/docker-compose/elasticsearch/scripts/data/data3_es5.json 
b/docker/thirdparties/docker-compose/elasticsearch/scripts/data/data3_es5.json
new file mode 100755
index 000..f4cc19ff9ec
--- /dev/null
+++ 
b/docker/thirdparties/docker-compose/elasticsearch/scripts/data/data3_es5.json
@@ -0,0 +1,28 @@
+{
+  "test1": "string3",
+  "test2": "text3_4*5",
+  "test3": 5.0,
+  "test4": "2022-08-08",
+  "test5": .22,
+  "test6": "2022-08-08T12:10:10.151",
+  "c_bool": [true, false, true, true],
+  "c_byte": [1, -2, -3, 4],
+  "c_short": [128, 129, -129, -130],
+  "c_integer": [32768, 32769, -32769, -32770],
+  "c_long": [-1, 0, 1, 2],
+  "c_unsigned_long": [0, 1, 2, 3],
+  "c_float": [1.0, 1.1, 1.2, 1.3],
+  "c_half_float": [1, 2, 3, 4],
+  "c_double": [1, 2, 3, 4],
+  "c_scaled_float": [1, 2, 3, 4],
+  "c_date": ["2020-01-01", "2020-01-02"],
+  "c_datetime": ["2020-01-01 12:00:00", "2020-01-02 13:01:01"],
+  "c_keyword": ["a", "b", "c"],
+  "c_text": ["d", "e", "f"],
+  "c_ip": ["192.168.0.1", "127.0.0.1"],
+  "c_person": [
+{"name": "Andy", "age": 18},
+{"name": "Tim", "age": 28}
+  ],
+  "message": "I'm not null or empty"
+}
diff --git 
a/docker/thirdparties/docker-compose/elasticsearch/scripts/es_init.sh 
b/docker/thirdparties/docker-compose/elasticsearch/scripts/es_init.sh
index 51364bbdf82..5c865e660ad 100755
--- a/docker/thirdparties/docker-compose/elasticsearch/scripts/es_init.sh
+++ b/docker/thirdparties/docker-compose/elasticsearch/scripts/es_init.sh
@@ -16,6 +16,27 @@
 # specific language governing permissions and limitations
 # under the License.
 
+# es 5
+# create index test1
+# shellcheck disable=SC2154
+curl "http://${ES_5_HOST}:9200/test1"; -H 

(doris) branch branch-2.1 updated: [Bug](runtime-filter) disable sync filter when pipeline engine is off (#36994)

2024-06-28 Thread lihaopeng
This is an automated email from the ASF dual-hosted git repository.

lihaopeng pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new cb80ae906f2 [Bug](runtime-filter) disable sync filter when pipeline 
engine is off (#36994)
cb80ae906f2 is described below

commit cb80ae906f2a0e7e4a57b77ba645a81e238b3fcf
Author: Pxl 
AuthorDate: Fri Jun 28 16:59:26 2024 +0800

[Bug](runtime-filter) disable sync filter when pipeline engine is off 
(#36994)

## Proposed changes
1. disable sync filter when pipeline engine is off
2. reduce some warning log
---
 be/src/exprs/runtime_filter.cpp | 2 +-
 be/src/runtime/runtime_state.cpp| 6 --
 be/src/vec/runtime/vdata_stream_mgr.cpp | 4 ++--
 3 files changed, 3 insertions(+), 9 deletions(-)

diff --git a/be/src/exprs/runtime_filter.cpp b/be/src/exprs/runtime_filter.cpp
index 39eb814bbea..1271ec39156 100644
--- a/be/src/exprs/runtime_filter.cpp
+++ b/be/src/exprs/runtime_filter.cpp
@@ -1852,7 +1852,7 @@ RuntimeFilterType IRuntimeFilter::get_real_type() {
 bool IRuntimeFilter::need_sync_filter_size() {
 return (type() == RuntimeFilterType::IN_OR_BLOOM_FILTER ||
 type() == RuntimeFilterType::BLOOM_FILTER) &&
-   _wrapper->get_build_bf_cardinality() && !_is_broadcast_join;
+   _wrapper->get_build_bf_cardinality() && !_is_broadcast_join && 
_enable_pipeline_exec;
 }
 
 Status IRuntimeFilter::update_filter(const UpdateRuntimeFilterParams* param) {
diff --git a/be/src/runtime/runtime_state.cpp b/be/src/runtime/runtime_state.cpp
index 75d06adc561..2713ee441dd 100644
--- a/be/src/runtime/runtime_state.cpp
+++ b/be/src/runtime/runtime_state.cpp
@@ -544,15 +544,9 @@ Status 
RuntimeState::register_consumer_runtime_filter(const doris::TRuntimeFilte
   bool need_local_merge, 
int node_id,
   doris::IRuntimeFilter** 
consumer_filter) {
 if (desc.has_remote_targets || need_local_merge) {
-LOG(WARNING) << "registe global ins:" << _profile.name()
- << " ,mgr: " << global_runtime_filter_mgr()
- << " ,filter id:" << desc.filter_id;
 return global_runtime_filter_mgr()->register_consumer_filter(desc, 
query_options(), node_id,
  
consumer_filter, false, true);
 } else {
-LOG(WARNING) << "registe local ins:" << _profile.name()
- << " ,mgr: " << global_runtime_filter_mgr()
- << " ,filter id:" << desc.filter_id;
 return local_runtime_filter_mgr()->register_consumer_filter(desc, 
query_options(), node_id,
 
consumer_filter, false, false);
 }
diff --git a/be/src/vec/runtime/vdata_stream_mgr.cpp 
b/be/src/vec/runtime/vdata_stream_mgr.cpp
index 46d335fbf00..4e48effb566 100644
--- a/be/src/vec/runtime/vdata_stream_mgr.cpp
+++ b/be/src/vec/runtime/vdata_stream_mgr.cpp
@@ -97,8 +97,8 @@ Status VDataStreamMgr::find_recvr(const TUniqueId& 
fragment_instance_id, PlanNod
 }
 ++range.first;
 }
-return Status::InternalError("Could not find local receiver for node {} 
with instance {}",
- node_id, print_id(fragment_instance_id));
+return Status::InvalidArgument("Could not find local receiver for node {} 
with instance {}",
+   node_id, print_id(fragment_instance_id));
 }
 
 Status VDataStreamMgr::transmit_block(const PTransmitDataParams* request,


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch master updated: [Refactor](meta) Delete useless classes and write methods (#36894)

2024-06-28 Thread dataroaring
This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
 new 4e142a303d4 [Refactor](meta) Delete useless classes and write methods 
(#36894)
4e142a303d4 is described below

commit 4e142a303d4fcaba0ae5ea5489778f59b3cb1848
Author: Peyz <30936555+iszhang...@users.noreply.github.com>
AuthorDate: Fri Jun 28 17:08:24 2024 +0800

[Refactor](meta) Delete useless classes and write methods (#36894)


Co-authored-by: zhangpeicheng 
---
 .../java/org/apache/doris/common/io/DeepCopy.java  |   1 +
 .../java/org/apache/doris/backup/BackupMeta.java   |   1 +
 .../apache/doris/catalog/CatalogRecycleBin.java|   2 +
 .../main/java/org/apache/doris/catalog/Column.java |  30 +-
 .../java/org/apache/doris/catalog/ColumnStats.java |  26 +
 .../org/apache/doris/catalog/DistributionInfo.java |   1 +
 .../apache/doris/catalog/HashDistributionInfo.java |   1 +
 .../apache/doris/catalog/ListPartitionInfo.java|   1 +
 .../apache/doris/catalog/MaterializedIndex.java|  11 +-
 .../java/org/apache/doris/catalog/OlapTable.java   |   3 +-
 .../java/org/apache/doris/catalog/Partition.java   |   2 +
 .../org/apache/doris/catalog/PartitionInfo.java|  20 +---
 .../doris/catalog/RandomDistributionInfo.java  |   1 +
 .../apache/doris/catalog/RangePartitionInfo.java   |   1 +
 .../java/org/apache/doris/catalog/Replica.java |  22 +---
 .../apache/doris/catalog/SinglePartitionInfo.java  |   1 +
 .../main/java/org/apache/doris/catalog/Tablet.java |  12 +--
 .../org/apache/doris/catalog/TempPartitions.java   |  24 +
 .../apache/doris/cloud/catalog/CloudPartition.java |   9 +-
 .../apache/doris/cloud/catalog/CloudReplica.java   |  17 +--
 .../apache/doris/persist/BackendIdsUpdateInfo.java |  63 
 .../java/org/apache/doris/persist/ClusterInfo.java | 114 -
 .../apache/doris/transaction/TabletCommitInfo.java |  11 +-
 .../org/apache/doris/catalog/ColumnStatTest.java   |  21 ++--
 .../java/org/apache/doris/catalog/ColumnTest.java  |  18 ++--
 .../doris/catalog/MaterializedIndexTest.java   |   6 +-
 .../doris/catalog/RangePartitionInfoTest.java  |   6 +-
 .../java/org/apache/doris/catalog/ReplicaTest.java |  12 +--
 .../java/org/apache/doris/catalog/TabletTest.java  |   6 +-
 .../apache/doris/catalog/TempPartitionTest.java|   4 +-
 30 files changed, 74 insertions(+), 373 deletions(-)

diff --git 
a/fe/fe-common/src/main/java/org/apache/doris/common/io/DeepCopy.java 
b/fe/fe-common/src/main/java/org/apache/doris/common/io/DeepCopy.java
index 106205f2256..282e6a0e16a 100644
--- a/fe/fe-common/src/main/java/org/apache/doris/common/io/DeepCopy.java
+++ b/fe/fe-common/src/main/java/org/apache/doris/common/io/DeepCopy.java
@@ -39,6 +39,7 @@ public class DeepCopy {
 // deep copy orig to dest.
 // the param "c" is the implementation class of "dest".
 // And the "dest" class must has method "readFields(DataInput)"
+@Deprecated
 public static boolean copy(Writable orig, Writable dest, Class c, int 
metaVersion) {
 MetaContext metaContext = new MetaContext();
 metaContext.setMetaVersion(metaVersion);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupMeta.java 
b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupMeta.java
index 45fdd1261ce..e27c8d19a84 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupMeta.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupMeta.java
@@ -140,6 +140,7 @@ public class BackupMeta implements Writable, 
GsonPostProcessable {
 Text.writeString(out, GsonUtils.GSON.toJson(this));
 }
 
+@Deprecated
 public void readFields(DataInput in) throws IOException {
 int size = in.readInt();
 for (int i = 0; i < size; i++) {
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/CatalogRecycleBin.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/CatalogRecycleBin.java
index 6b6fe5284a4..17cc5cd148b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/CatalogRecycleBin.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/CatalogRecycleBin.java
@@ -1305,6 +1305,7 @@ public class CatalogRecycleBin extends MasterDaemon 
implements Writable, GsonPos
 updateDbInfoForLowerVersion();
 }
 
+@Deprecated
 public void readFields(DataInput in) throws IOException {
 int count = in.readInt();
 for (int i = 0; i < count; i++) {
@@ -1439,6 +1440,7 @@ public class CatalogRecycleBin extends MasterDaemon 
implements Writable, GsonPos
 return table;
 }
 
+@Deprecated
 public void readFields(DataInput in) throws IOException {
 dbId = in.readLong();
 table = Table.read(in);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Col

(doris) branch master updated (4e142a303d4 -> 1789ec61c40)

2024-06-28 Thread huajianlan
This is an automated email from the ASF dual-hosted git repository.

huajianlan pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


from 4e142a303d4 [Refactor](meta) Delete useless classes and write methods 
(#36894)
 add 1789ec61c40 [refactor](nereids) New distribute planner (#36531)

No new revisions were added by this update.

Summary of changes:
 .../antlr4/org/apache/doris/nereids/DorisParser.g4 |   1 +
 .../src/main/java/org/apache/doris/common/Id.java  |   7 +-
 .../java/org/apache/doris/common/TreeNode.java |  25 ++
 .../org/apache/doris/common/profile/Profile.java   |  10 +
 .../doris/common/profile/SummaryProfile.java   |  13 +
 .../org/apache/doris/nereids/NereidsPlanner.java   |  60 +++-
 .../doris/nereids/parser/LogicalPlanBuilder.java   |   3 +
 .../properties/ChildrenPropertiesRegulator.java|  52 ++--
 .../functions/table/TableValuedFunction.java   |   4 +
 .../doris/nereids/trees/plans/PlaceholderId.java   |   7 +-
 .../trees/plans/commands/ExplainCommand.java   |   1 +
 .../plans/distribute/BucketSpecifyInstances.java   |  30 ++
 .../plans/distribute/DefaultSpecifyInstances.java  |  30 ++
 .../trees/plans/distribute/DistributePlanner.java  |  66 
 .../trees/plans/distribute/DistributedPlan.java|  57 
 .../trees/plans/distribute/FragmentIdMapping.java  |  71 +
 .../plans/distribute/NereidsSpecifyInstances.java  |  59 
 .../plans/distribute/PipelineDistributedPlan.java  |  72 +
 .../BackendDistributedPlanWorkerManager.java   |  63 
 .../plans/distribute/worker/BackendWorker.java |  74 +
 .../distribute/worker/DistributedPlanWorker.java   |  40 +++
 .../worker/DistributedPlanWorkerManager.java   |  25 ++
 .../worker/LoadBalanceScanWorkerSelector.java  | 336 +
 .../distribute/worker/ScanWorkerSelector.java  |  62 
 .../plans/distribute/worker/WorkerScanRanges.java  |  33 ++
 .../trees/plans/distribute/worker/Workload.java|  22 ++
 .../worker/job/AbstractUnassignedJob.java  |  74 +
 .../worker/job/AbstractUnassignedScanJob.java  | 202 +
 .../plans/distribute/worker/job/AssignedJob.java   |  39 +++
 .../distribute/worker/job/AssignedJobBuilder.java  |  64 
 .../distribute/worker/job/BucketScanSource.java| 148 +
 .../distribute/worker/job/CustomAssignmentJob.java |  29 ++
 .../distribute/worker/job/DefaultScanSource.java   | 111 +++
 .../worker/job/LocalShuffleAssignedJob.java|  43 +++
 .../plans/distribute/worker/job/ScanRange.java |  23 ++
 .../plans/distribute/worker/job/ScanRanges.java| 109 +++
 .../plans/distribute/worker/job/ScanSource.java|  41 +++
 .../plans/distribute/worker/job/Splittable.java|  69 +
 .../distribute/worker/job/StaticAssignedJob.java   | 106 +++
 .../UnassignedGatherScanMultiRemoteTablesJob.java  |  87 ++
 .../plans/distribute/worker/job/UnassignedJob.java |  52 
 .../worker/job/UnassignedJobBuilder.java   | 266 
 .../worker/job/UnassignedQueryConstantJob.java |  50 +++
 .../job/UnassignedScanBucketOlapTableJob.java  | 310 +++
 .../job/UnassignedScanSingleOlapTableJob.java  |  82 +
 .../job/UnassignedScanSingleRemoteTableJob.java|  53 
 .../worker/job/UnassignedShuffleJob.java   | 124 
 .../worker/job/UnassignedSpecifyInstancesJob.java  |  49 +++
 .../worker/job/UninstancedScanSource.java  |  36 +++
 .../distribute/worker/job/WorkerScanSource.java|  31 ++
 .../org/apache/doris/nereids/util/JoinUtils.java   |  22 +-
 .../java/org/apache/doris/nereids/util/Utils.java  |  14 +
 .../org/apache/doris/planner/DataGenScanNode.java  |   4 +
 .../org/apache/doris/planner/OlapScanNode.java |  37 ++-
 .../org/apache/doris/planner/PlanFragment.java |  25 ++
 .../java/org/apache/doris/planner/PlanNode.java|  25 ++
 .../java/org/apache/doris/planner/ScanNode.java|   9 +-
 .../java/org/apache/doris/qe/ConnectContext.java   |   6 +
 .../main/java/org/apache/doris/qe/Coordinator.java |  51 ++--
 .../org/apache/doris/qe/NereidsCoordinator.java| 194 
 .../java/org/apache/doris/qe/SessionVariable.java  |  54 +++-
 .../java/org/apache/doris/qe/StmtExecutor.java |  12 +-
 .../org/apache/doris/qe/HmsQueryCacheTest.java |  14 +-
 .../distribute/colocate_union_numbers.out  |  10 +
 .../nereids_syntax_p0/distribute/local_shuffle.out |  36 +++
 .../prune_bucket_with_bucket_shuffle_join.out  |   5 +
 .../distribute/query_constant.out  |  12 +
 .../data/nereids_syntax_p0/distribute/shuffle.out  |   8 +
 .../distribute/shuffle_left_join.out   |   9 +
 .../test_forbid_unknown_col_stats.groovy   |   2 +-
 .../distribute/colocate_union_numbers.groovy   |  70 +
 .../distribute/local_shuffle.groovy| 187 
 .../prune_bucket_with_bucket_shuffle_join.groovy  

(doris) branch master updated (1789ec61c40 -> f4a6434da7a)

2024-06-28 Thread morningman
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


from 1789ec61c40 [refactor](nereids) New distribute planner (#36531)
 add f4a6434da7a [feature](insert)support external hive truncate table DDL 
(#36801)

No new revisions were added by this update.

Summary of changes:
 .../apache/doris/analysis/TruncateTableStmt.java   |   3 -
 .../main/java/org/apache/doris/catalog/Env.java|   6 +-
 .../org/apache/doris/datasource/CatalogIf.java |   3 +
 .../apache/doris/datasource/ExternalCatalog.java   |  23 +
 .../doris/datasource/hive/HMSCachedClient.java |   2 +
 .../doris/datasource/hive/HiveMetadataOps.java |  16 
 .../hive/PostgreSQLJdbcHMSCachedClient.java|   4 +
 .../datasource/hive/ThriftHMSCachedClient.java |  19 +++-
 .../datasource/iceberg/IcebergMetadataOps.java |   5 ++
 .../datasource/operations/ExternalMetadataOps.java |   8 ++
 .../doris/datasource/TestHMSCachedClient.java  |   3 +
 .../hive/ddl/test_hive_truncate_table.out  |  24 +
 .../hive/ddl/test_hive_truncate_table.groovy   | 100 +
 13 files changed, 210 insertions(+), 6 deletions(-)
 create mode 100644 
regression-test/data/external_table_p0/hive/ddl/test_hive_truncate_table.out
 create mode 100644 
regression-test/suites/external_table_p0/hive/ddl/test_hive_truncate_table.groovy


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch master updated: [Fix](multi-catalog) Fix the transaction is not removed in abnormal situations by removing transaction in finally block. (#36705)

2024-06-28 Thread morningman
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
 new a2ee53ad76f [Fix](multi-catalog) Fix the transaction is not removed in 
abnormal situations by removing transaction in finally block. (#36705)
a2ee53ad76f is described below

commit a2ee53ad76ff651d8059b954f4fde48eaf52b9f6
Author: Qi Chen 
AuthorDate: Fri Jun 28 17:41:20 2024 +0800

[Fix](multi-catalog) Fix the transaction is not removed in abnormal 
situations by removing transaction in finally block. (#36705)

[Fix] (multi-catalog) Fix the transaction is not removed in abnormal
situations by removing transaction in `finally` block.
---
 .../java/org/apache/doris/transaction/HiveTransactionManager.java  | 7 +--
 .../org/apache/doris/transaction/IcebergTransactionManager.java| 7 +--
 2 files changed, 10 insertions(+), 4 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/transaction/HiveTransactionManager.java
 
b/fe/fe-core/src/main/java/org/apache/doris/transaction/HiveTransactionManager.java
index 838d135fa45..c48210ad452 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/transaction/HiveTransactionManager.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/transaction/HiveTransactionManager.java
@@ -59,8 +59,11 @@ public class HiveTransactionManager implements 
TransactionManager {
 
 @Override
 public void rollback(long id) {
-getTransactionWithException(id).rollback();
-transactions.remove(id);
+try {
+getTransactionWithException(id).rollback();
+} finally {
+transactions.remove(id);
+}
 }
 
 @Override
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/transaction/IcebergTransactionManager.java
 
b/fe/fe-core/src/main/java/org/apache/doris/transaction/IcebergTransactionManager.java
index f4b802aaa99..f373c133685 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/transaction/IcebergTransactionManager.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/transaction/IcebergTransactionManager.java
@@ -51,8 +51,11 @@ public class IcebergTransactionManager implements 
TransactionManager {
 
 @Override
 public void rollback(long id) {
-getTransactionWithException(id).rollback();
-transactions.remove(id);
+try {
+getTransactionWithException(id).rollback();
+} finally {
+transactions.remove(id);
+}
 }
 
 @Override


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch master updated: [Fix](hive-writer) Fixed the issue where `uncompletedMpuPendingUploads` did not remove objects correctly. (#36905)

2024-06-28 Thread morningman
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
 new c127ed1f658 [Fix](hive-writer) Fixed the issue where 
`uncompletedMpuPendingUploads` did not remove objects correctly. (#36905)
c127ed1f658 is described below

commit c127ed1f6588a2c22a0a78ece06a1087116f9bec
Author: Qi Chen 
AuthorDate: Fri Jun 28 17:41:02 2024 +0800

[Fix](hive-writer) Fixed the issue where `uncompletedMpuPendingUploads` did 
not remove objects correctly. (#36905)

[Fix](hive-writer) Fixed the issue where `uncompletedMpuPendingUploads`
did not remove objects correctly.
---
 .../apache/doris/datasource/hive/HMSTransaction.java   | 18 ++
 1 file changed, 18 insertions(+)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSTransaction.java 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSTransaction.java
index 824af6996a9..d883b9dc786 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSTransaction.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSTransaction.java
@@ -103,6 +103,24 @@ public class HMSTransaction implements Transaction {
 this.s3MPUPendingUpload = s3MPUPendingUpload;
 this.path = path;
 }
+
+@Override
+public boolean equals(Object o) {
+if (this == o) {
+return true;
+}
+if (o == null || getClass() != o.getClass()) {
+return false;
+}
+UncompletedMpuPendingUpload that = (UncompletedMpuPendingUpload) o;
+return Objects.equals(s3MPUPendingUpload, that.s3MPUPendingUpload) 
&& Objects.equals(path,
+that.path);
+}
+
+@Override
+public int hashCode() {
+return Objects.hash(s3MPUPendingUpload, path);
+}
 }
 
 private Set uncompletedMpuPendingUploads = 
new HashSet<>();


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch master updated (a2ee53ad76f -> b478800da7f)

2024-06-28 Thread eldenmoon
This is an automated email from the ASF dual-hosted git repository.

eldenmoon pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


from a2ee53ad76f [Fix](multi-catalog) Fix the transaction is not removed in 
abnormal situations by removing transaction in finally block. (#36705)
 add b478800da7f [Fix](variant) ignore serialization of nothing type 
(#36997)

No new revisions were added by this update.

Summary of changes:
 be/src/vec/data_types/data_type_object.cpp   | 14 +++---
 regression-test/data/variant_p0/load.out | 12 ++--
 .../data/variant_p0/test_sub_path_pruning.out| 10 +-
 regression-test/suites/variant_p0/load.groovy|  8 
 .../suites/variant_p0/test_sub_path_pruning.groovy   | 20 +++-
 5 files changed, 33 insertions(+), 31 deletions(-)


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch branch-2.1 updated: [Test](manager_interface)append manager interface test. (#35889) (#36912)

2024-06-28 Thread morningman
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 7c6d280def4 [Test](manager_interface)append manager interface test. 
(#35889) (#36912)
7c6d280def4 is described below

commit 7c6d280def40459a9d20a2fbdcd2d66acb3263e2
Author: Mingyu Chen 
AuthorDate: Fri Jun 28 18:38:33 2024 +0800

[Test](manager_interface)append manager interface test. (#35889) (#36912)

bp #35889

-

Co-authored-by: daidai <2017501...@qq.com>
---
 .../data/manager/test_manager_interface_1.out  |  23 +
 .../suites/manager/test_manager_interface_1.groovy | 798 +
 .../suites/manager/test_manager_interface_2.groovy | 317 
 .../suites/manager/test_manager_interface_3.groovy | 613 
 4 files changed, 1751 insertions(+)

diff --git a/regression-test/data/manager/test_manager_interface_1.out 
b/regression-test/data/manager/test_manager_interface_1.out
new file mode 100644
index 000..0f50524d648
--- /dev/null
+++ b/regression-test/data/manager/test_manager_interface_1.out
@@ -0,0 +1,23 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !metadata_1 --
+internal   test_manager_metadata_name_ids  \N
+
+-- !metadata_2 --
+internal   test_manager_metadata_name_ids  test_metadata_name_ids
+
+-- !metadata_2 --
+
+-- !tables_1 --
+k1 TINYINT Yes true\N  
+k2 DECIMAL(10, 2)  Yes true10.05   
+k3 CHAR(10)Yes true\N  BLOOM_FILTER
+k4 INT No  false   1   NONE
+k5 TEXTYes false   \N  NONE,BLOOM_FILTER
+
+-- !tables_2 --
+test_manager_tb_1  DUP_KEYSk1  TINYINT TINYINT Yes true
\N  true
+   k2  DECIMAL(10, 2)  DECIMALV3(10, 2)Yes true
10.05   true
+   k3  CHAR(10)CHAR(10)Yes true\N  
BLOOM_FILTERtrue
+   k4  INT INT No  false   1   NONEtrue

+   k5  TEXTTEXTYes false   \N  
NONE,BLOOM_FILTER   true
+
diff --git a/regression-test/suites/manager/test_manager_interface_1.groovy 
b/regression-test/suites/manager/test_manager_interface_1.groovy
new file mode 100644
index 000..55d9f655453
--- /dev/null
+++ b/regression-test/suites/manager/test_manager_interface_1.groovy
@@ -0,0 +1,798 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.apache.doris.regression.suite.ClusterOptions
+import org.apache.doris.regression.util.NodeType
+
+import java.time.LocalDateTime
+import java.time.Duration
+import java.time.format.DateTimeFormatter
+
+
+
+suite('test_manager_interface_1',"p0") {
+
+
+
+logger.info("test_manager_interface_1 start")
+
+sql """ switch internal """
+
+
+String jdbcUrl = context.config.jdbcUrl
+def tokens = context.config.jdbcUrl.split('/')
+jdbcUrl=tokens[0] + "//" + tokens[2] + "/" + "?"
+String jdbcUser = context.config.jdbcUser
+String jdbcPassword = context.config.jdbcPassword
+String s3_endpoint = getS3Endpoint()
+String bucket = getS3BucketName()
+String driver_url = 
"https://${bucket}.${s3_endpoint}/regression/jdbc_driver/mysql-connector-java-8.0.25.jar";
+
+
+
+//select * from internal.information_schema.schemata
+def test_schemata = {
+logger.info("TEST select * from internal.information_schema.schemata");
+
+List>  schemata = sql   """select * from 
internal.information_schema.schemata"""
+for (int i = 0; i < schemata.size(); i++) {
+assertTrue(!schemata[i][0].isEmpty()) // CATALOG_NAME
+assertTrue(schemata[i][0].toLowerCase() != "null") 
+
+assertTrue(!schemata[i][1].isEmpty()) // SCHEMA_NAME
+assertTrue(schemata[i][1].toLowerCase() != "null") 
+}
+List>  schemata2 = sql   """select * from 
internal.information_schema.schemata where CATALOG_NAME = "i

(doris) branch branch-2.1 updated: [Fix](variant) ignore serialization of nothing type (#37006)

2024-06-28 Thread morningman
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new d0c0a7b9ae4 [Fix](variant) ignore serialization of nothing type 
(#37006)
d0c0a7b9ae4 is described below

commit d0c0a7b9ae4d4f7efff5e0fc64c3b26004d3b51f
Author: lihangyu <15605149...@163.com>
AuthorDate: Fri Jun 28 18:41:40 2024 +0800

[Fix](variant) ignore serialization of nothing type (#37006)

picked from #36997
---
 be/src/vec/data_types/data_type_object.cpp   | 14 +++---
 regression-test/data/variant_p0/load.out | 12 ++--
 .../data/variant_p0/test_sub_path_pruning.out| 10 +-
 regression-test/suites/variant_p0/load.groovy|  8 
 .../suites/variant_p0/test_sub_path_pruning.groovy   | 20 +++-
 5 files changed, 33 insertions(+), 31 deletions(-)

diff --git a/be/src/vec/data_types/data_type_object.cpp 
b/be/src/vec/data_types/data_type_object.cpp
index 7adc4c17f56..7a75583cd7b 100644
--- a/be/src/vec/data_types/data_type_object.cpp
+++ b/be/src/vec/data_types/data_type_object.cpp
@@ -63,6 +63,9 @@ int64_t 
DataTypeObject::get_uncompressed_serialized_bytes(const IColumn& column,
 size += sizeof(uint32_t);
 for (const auto& entry : subcolumns) {
 auto type = entry->data.get_least_common_type();
+if (is_nothing(type)) {
+continue;
+}
 
 PColumnMeta column_meta_pb;
 column_meta_pb.set_name(entry->path.get_path());
@@ -91,15 +94,18 @@ char* DataTypeObject::serialize(const IColumn& column, 
char* buf, int be_exec_ve
 
 const auto& subcolumns = column_object.get_subcolumns();
 
-// 1. serialize num of subcolumns
-*reinterpret_cast(buf) = subcolumns.size();
+char* size_pos = buf;
 buf += sizeof(uint32_t);
 
+size_t num_of_columns = 0;
 // 2. serialize each subcolumn in a loop
 for (const auto& entry : subcolumns) {
 // 2.1 serialize subcolumn column meta pb (path and type)
 auto type = entry->data.get_least_common_type();
-
+if (is_nothing(type)) {
+continue;
+}
+++num_of_columns;
 PColumnMeta column_meta_pb;
 column_meta_pb.set_name(entry->path.get_path());
 type->to_pb_column_meta(&column_meta_pb);
@@ -113,6 +119,8 @@ char* DataTypeObject::serialize(const IColumn& column, 
char* buf, int be_exec_ve
 // 2.2 serialize subcolumn
 buf = type->serialize(entry->data.get_finalized_column(), buf, 
be_exec_version);
 }
+// serialize num of subcolumns
+*reinterpret_cast(size_pos) = num_of_columns;
 
 return buf;
 }
diff --git a/regression-test/data/variant_p0/load.out 
b/regression-test/data/variant_p0/load.out
index 3cbbd432b0e..8dce41f8e6a 100644
--- a/regression-test/data/variant_p0/load.out
+++ b/regression-test/data/variant_p0/load.out
@@ -79,11 +79,11 @@
 -- !sql --
 {"c":"123"}
 {"c":123}
-{"cc":[123.0]}
+{"cc":[123.2]}
 {"cc":[123.1]}
 {"ccc":123}
 {"ccc":123321}
-{"":123.0}
+{"":123.22}
 {"":123.11}
 {"c":[123]}
 {"c":[123456789]}
@@ -123,7 +123,7 @@
 1.101111800
 1. 17211
 \N 123456
-123191191
+123.22 191191
 \N 123456789101112
 
 -- !sql_7 --
@@ -155,7 +155,7 @@
 123{"A":123}
 123456 {"A":123456}
 123456789101112{"A":123456789101112}
-191191 {"A":191191,"a":123.0,"c":123}
+191191 {"A":191191,"a":123.22,"c":123}
 1800   {"A":1800,"a":1.10111,"c":[12345]}
 17211  {"A":17211,"a":1.,"c":11}
 
@@ -175,7 +175,7 @@
 \N 123456789101112 {"A":123456789101112}   \N
 \N \N  {"AA":[123456]} \N
 \N \N  {"AA":[123456789101112]}\N
-123191191  {"A":191191,"a":123.0,"c":123}  \N
+123.22 191191  {"A":191191,"a":123.22,"c":123} \N
 123\N  {"a":"123","c":123456}  \N
 1.101111800{"A":1800,"a":1.10111,"c":[12345]}  \N
 1. 17211   {"A":17211,"a":1.,"c":11}   \N
@@ -212,7 +212,7 @@
 [123]
 
 -- !sql_25 --
-5  54999.684   615
+5  54999.9935  615
 
 -- !sql_26 --
 5000
diff --git a/regression-test/data/variant_p0/test_sub_path_pruning.out 
b/regression-test/data/variant_p0/test_sub_path_pruning.out
index a48bc550d00..16328739167 100644
--- a/regression-test/data/variant_p0/test_sub_path_pruning.out
+++ b/regression-test/data/variant_p0/test_sub_path_pruning.out
@@ -233,11 +233,7 @@
 {"b":{"c":{"d":{"e":11}}},"c":{"d":{"e":12}},"d":{"e":13},"e":14}
 
 -- !sql --
-
-{"c":{"d":{"e":11}}}
-
--- !sql --
-""
+1
 {"c":{"d":{"e":11}}}
 
 -- !sql --
@@ -252,10 +248,6 @@
 \N
 
 
--- !sql --
-""
-{"e":11}
-
 -- !sql --
 1  1
 2  1
diff --git a/regression-test/suites/variant_p0/load.groovy 
b/regression-test/suites/variant_p0/load.groovy
index 572f7ce8ffc..f6e14f64c59 100644
--- a/regression-test/suites/variant_p0/load.gr

(doris) branch master updated: [chore](profile) rm useless code of profile (#36915)

2024-06-28 Thread morningman
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
 new 8902df51ced [chore](profile) rm useless code of profile (#36915)
8902df51ced is described below

commit 8902df51ced892e2d08ddfc355e18242320a0339
Author: zhiqiang 
AuthorDate: Fri Jun 28 18:44:06 2024 +0800

[chore](profile) rm useless code of profile (#36915)
---
 .../doris/common/profile/ExecutionProfile.java | 23 --
 1 file changed, 23 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/common/profile/ExecutionProfile.java
 
b/fe/fe-core/src/main/java/org/apache/doris/common/profile/ExecutionProfile.java
index 3c683fced9d..ebe41c1146f 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/common/profile/ExecutionProfile.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/common/profile/ExecutionProfile.java
@@ -76,8 +76,6 @@ public class ExecutionProfile {
 private Map fragmentProfiles;
 // Profile for load channels. Only for load job.
 private RuntimeProfile loadChannelProfile;
-// FragmentId -> InstanceId -> RuntimeProfile
-private Map> 
fragmentInstancesProfiles;
 
 // use to merge profile from multi be
 private Map>> 
multiBeProfile = null;
@@ -85,8 +83,6 @@ public class ExecutionProfile {
 // Not serialize this property, it is only used to get profile id.
 private SummaryProfile summaryProfile;
 
-// BE only has instance id, does not have fragmentid, so should use this 
map to find fragmentid.
-private Map instanceIdToFragmentId;
 private Map fragmentIdBeNum;
 private Map seqNoToFragmentId;
 
@@ -112,8 +108,6 @@ public class ExecutionProfile {
 }
 loadChannelProfile = new RuntimeProfile("LoadChannels");
 root.addChild(loadChannelProfile);
-fragmentInstancesProfiles = Maps.newHashMap();
-instanceIdToFragmentId = Maps.newHashMap();
 }
 
 private List> getMultiBeProfile(int fragmentId) {
@@ -299,23 +293,6 @@ public class ExecutionProfile {
 
multiBeProfile.get(params.fragment_id).put(backend.getHeartbeatAddress(), 
taskProfile);
 }
 
-// MultiInstances may update the profile concurrently
-public synchronized void addInstanceProfile(PlanFragmentId fragmentId, 
TUniqueId instanceId,
-RuntimeProfile instanceProfile) {
-Map instanceProfiles = 
fragmentInstancesProfiles.get(fragmentId);
-if (instanceProfiles == null) {
-instanceProfiles = Maps.newHashMap();
-fragmentInstancesProfiles.put(fragmentId, instanceProfiles);
-}
-RuntimeProfile existingInstanceProfile = 
instanceProfiles.get(instanceId);
-if (existingInstanceProfile == null) {
-instanceProfiles.put(instanceId, instanceProfile);
-instanceIdToFragmentId.put(instanceId, fragmentId);
-fragmentProfiles.get(fragmentId.asInt()).addChild(instanceProfile);
-return;
-}
-}
-
 public synchronized void addFragmentBackend(PlanFragmentId fragmentId, 
Long backendId) {
 fragmentIdBeNum.put(fragmentId.asInt(), 
fragmentIdBeNum.get(fragmentId.asInt()) + 1);
 }


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch branch-2.0 updated: [fix](multicatalog) fix npe issue when alter property for a non-exist catalog (#36951)

2024-06-28 Thread lide
This is an automated email from the ASF dual-hosted git repository.

lide pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.0 by this push:
 new 54592d048a0 [fix](multicatalog) fix npe issue when alter property for 
a non-exist catalog (#36951)
54592d048a0 is described below

commit 54592d048a0c3e0eff698d0c8a062f9e382063db
Author: Yulei-Yang 
AuthorDate: Fri Jun 28 18:52:43 2024 +0800

[fix](multicatalog) fix npe issue when alter property for a non-exist 
catalog (#36951)
---
 fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java  | 2 +-
 .../suites/external_table_p2/hive/test_external_catalog_hive.groovy   | 4 
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java
index d0c8f91800d..ac809812260 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java
@@ -348,10 +348,10 @@ public class CatalogMgr implements Writable, 
GsonPostProcessable {
 writeLock();
 try {
 CatalogIf catalog = nameToCatalog.get(stmt.getCatalogName());
-Map oldProperties = catalog.getProperties();
 if (catalog == null) {
 throw new DdlException("No catalog found with name: " + 
stmt.getCatalogName());
 }
+Map oldProperties = catalog.getProperties();
 if (stmt.getNewProperties().containsKey("type") && 
!catalog.getType()
 .equalsIgnoreCase(stmt.getNewProperties().get("type"))) {
 throw new DdlException("Can't modify the type of catalog 
property with name: " + stmt.getCatalogName());
diff --git 
a/regression-test/suites/external_table_p2/hive/test_external_catalog_hive.groovy
 
b/regression-test/suites/external_table_p2/hive/test_external_catalog_hive.groovy
index ef2b5a32e42..45b0b7dc9ca 100644
--- 
a/regression-test/suites/external_table_p2/hive/test_external_catalog_hive.groovy
+++ 
b/regression-test/suites/external_table_p2/hive/test_external_catalog_hive.groovy
@@ -152,5 +152,9 @@ suite("test_external_catalog_hive", "p2") {
 """
 exception "Failed to init access controller: bound must be 
positive"
 }
+
+   sql """DROP CATALOG if exists ctl_not_exist_not_exist"""
+def res4 = sql """ALTER CATALOG ctl_not_exist_not_exist SET PROPERTIES 
('s3.access_key' = '')"""
+assertTrue(res4.contains("No catalog found with name"))
 }
 }


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch master updated (8902df51ced -> e348d797d00)

2024-06-28 Thread morrysnow
This is an automated email from the ASF dual-hosted git repository.

morrysnow pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


from 8902df51ced [chore](profile) rm useless code of profile (#36915)
 add e348d797d00 [chore](upgrade) turn off fallback_to_original_planner 
when upgrade (#37005)

No new revisions were added by this update.

Summary of changes:
 fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch master updated (e348d797d00 -> 70b816b3060)

2024-06-28 Thread gavinchou
This is an automated email from the ASF dual-hosted git repository.

gavinchou pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


from e348d797d00 [chore](upgrade) turn off fallback_to_original_planner 
when upgrade (#37005)
 add 70b816b3060 [enhancement](cloud) batching get visible version from 
MetaService (#34615)

No new revisions were added by this update.

Summary of changes:
 cloud/src/meta-service/meta_service.cpp|  3 +++
 cloud/src/meta-service/meta_service_txn.cpp|  6 +-
 .../java/org/apache/doris/catalog/OlapTable.java   | 15 ++
 .../java/org/apache/doris/catalog/Partition.java   |  7 +++
 .../apache/doris/cloud/catalog/CloudPartition.java | 24 --
 .../cloud/datasource/CloudInternalCatalog.java |  4 
 .../transaction/CloudGlobalTransactionMgr.java |  4 +++-
 .../doris/common/NereidsSqlCacheManager.java   |  9 
 .../org/apache/doris/nereids/SqlCacheContext.java  |  1 -
 .../org/apache/doris/qe/cache/CacheAnalyzer.java   | 11 ++
 gensrc/proto/cloud.proto   |  3 +++
 11 files changed, 66 insertions(+), 21 deletions(-)


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch master updated (70b816b3060 -> 584ef7d36b1)

2024-06-28 Thread kxiao
This is an automated email from the ASF dual-hosted git repository.

kxiao pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


from 70b816b3060 [enhancement](cloud) batching get visible version from 
MetaService (#34615)
 add 584ef7d36b1 [fix](regression test) fix unstable single compaction test 
p2 (#36881)

No new revisions were added by this update.

Summary of changes:
 .../test_single_compaction_fault_injection.groovy  | 68 
 .../test_single_replica_compaction.groovy  | 72 --
 2 files changed, 83 insertions(+), 57 deletions(-)


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch master updated: [improvement](jdbc catalog) Modify the maximum number of connections in the connection pool to 30 by default (#36720)

2024-06-28 Thread zykkk
This is an automated email from the ASF dual-hosted git repository.

zykkk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
 new bafbb944a9a [improvement](jdbc catalog) Modify the maximum number of 
connections in the connection pool to 30 by default (#36720)
bafbb944a9a is described below

commit bafbb944a9afa714958297dbd24440ccfdf7dfc0
Author: zy-kkk 
AuthorDate: Fri Jun 28 20:13:34 2024 +0800

[improvement](jdbc catalog) Modify the maximum number of connections in the 
connection pool to 30 by default (#36720)

In many cases, we found that users would use JDBC Catalog to perform a
large number of queries, which resulted in the maximum of 10 connections
being insufficient, so I adjusted it to 30, which covered most needs.
---
 .../src/main/java/org/apache/doris/jdbc/JdbcDataSourceConfig.java | 2 +-
 fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java   | 2 +-
 .../src/test/java/org/apache/doris/catalog/JdbcResourceTest.java  | 4 ++--
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git 
a/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/JdbcDataSourceConfig.java
 
b/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/JdbcDataSourceConfig.java
index 5fdbc211ab0..a99377add25 100644
--- 
a/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/JdbcDataSourceConfig.java
+++ 
b/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/JdbcDataSourceConfig.java
@@ -31,7 +31,7 @@ public class JdbcDataSourceConfig {
 private TJdbcOperation op;
 private TOdbcTableType tableType;
 private int connectionPoolMinSize = 1;
-private int connectionPoolMaxSize = 10;
+private int connectionPoolMaxSize = 30;
 private int connectionPoolMaxWaitTime = 5000;
 private int connectionPoolMaxLifeTime = 180;
 private boolean connectionPoolKeepAlive = false;
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java
index 3878500f917..1db801b024a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java
@@ -140,7 +140,7 @@ public class JdbcResource extends Resource {
 OPTIONAL_PROPERTIES_DEFAULT_VALUE.put(INCLUDE_DATABASE_LIST, "");
 OPTIONAL_PROPERTIES_DEFAULT_VALUE.put(EXCLUDE_DATABASE_LIST, "");
 OPTIONAL_PROPERTIES_DEFAULT_VALUE.put(CONNECTION_POOL_MIN_SIZE, "1");
-OPTIONAL_PROPERTIES_DEFAULT_VALUE.put(CONNECTION_POOL_MAX_SIZE, "10");
+OPTIONAL_PROPERTIES_DEFAULT_VALUE.put(CONNECTION_POOL_MAX_SIZE, "30");
 OPTIONAL_PROPERTIES_DEFAULT_VALUE.put(CONNECTION_POOL_MAX_LIFE_TIME, 
"180");
 OPTIONAL_PROPERTIES_DEFAULT_VALUE.put(CONNECTION_POOL_MAX_WAIT_TIME, 
"5000");
 OPTIONAL_PROPERTIES_DEFAULT_VALUE.put(CONNECTION_POOL_KEEP_ALIVE, 
"false");
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/catalog/JdbcResourceTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/catalog/JdbcResourceTest.java
index 8e004d4b236..81c2157686a 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/catalog/JdbcResourceTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/JdbcResourceTest.java
@@ -87,7 +87,7 @@ public class JdbcResourceTest {
 // Verify the default properties were applied during the replay
 Map properties = jdbcResource.getCopiedProperties();
 Assert.assertEquals("1", properties.get("connection_pool_min_size"));
-Assert.assertEquals("10", properties.get("connection_pool_max_size"));
+Assert.assertEquals("30", properties.get("connection_pool_max_size"));
 Assert.assertEquals("180", 
properties.get("connection_pool_max_life_time"));
 Assert.assertEquals("5000", 
properties.get("connection_pool_max_wait_time"));
 Assert.assertEquals("false", 
properties.get("connection_pool_keep_alive"));
@@ -110,7 +110,7 @@ public class JdbcResourceTest {
 // Verify the default properties were applied during the replay
 Map properties = 
replayedResource.getCopiedProperties();
 Assert.assertEquals("1", properties.get("connection_pool_min_size"));
-Assert.assertEquals("10", properties.get("connection_pool_max_size"));
+Assert.assertEquals("30", properties.get("connection_pool_max_size"));
 Assert.assertEquals("180", 
properties.get("connection_pool_max_life_time"));
 Assert.assertEquals("5000", 
properties.get("connection_pool_max_wait_time"));
 Assert.assertEquals("false", 
properties.get("connection_pool_keep_alive"));


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-

(doris) branch master updated: [fix](statistics)Escape fetch partition stats sql. (#36941)

2024-06-28 Thread lijibing
This is an automated email from the ASF dual-hosted git repository.

lijibing pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
 new 9229dcd62ab [fix](statistics)Escape fetch partition stats sql. (#36941)
9229dcd62ab is described below

commit 9229dcd62ab91bf9a8939c6f58d5851cdfde0ead
Author: Jibing-Li <64681310+jibing...@users.noreply.github.com>
AuthorDate: Fri Jun 28 20:51:52 2024 +0800

[fix](statistics)Escape fetch partition stats sql. (#36941)

Sql to fetch partition stats need to be escaped.
---
 .../apache/doris/statistics/AnalysisManager.java   |  2 +-
 .../PartitionColumnStatisticCacheLoader.java   |  2 +-
 .../doris/statistics/StatisticsRepository.java |  6 ++---
 .../suites/statistics/test_partition_stats.groovy  | 27 ++
 4 files changed, 32 insertions(+), 5 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisManager.java 
b/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisManager.java
index 22bc11971c2..1e95adf1714 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisManager.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisManager.java
@@ -788,7 +788,7 @@ public class AnalysisManager implements Writable {
 StringBuilder partNamePredicate = new StringBuilder();
 while (iterator.hasNext()) {
 partNamePredicate.append("'");
-partNamePredicate.append(iterator.next());
+
partNamePredicate.append(StatisticsUtil.escapeSQL(iterator.next()));
 partNamePredicate.append("'");
 partNamePredicate.append(",");
 }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/statistics/PartitionColumnStatisticCacheLoader.java
 
b/fe/fe-core/src/main/java/org/apache/doris/statistics/PartitionColumnStatisticCacheLoader.java
index ac807a0d5f7..c365f6b1a74 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/statistics/PartitionColumnStatisticCacheLoader.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/statistics/PartitionColumnStatisticCacheLoader.java
@@ -57,7 +57,7 @@ public class PartitionColumnStatisticCacheLoader extends
 private Optional 
loadFromPartitionStatsTable(PartitionColumnStatisticCacheKey key) {
 List partitionResults;
 try {
-String partName = "'" + key.partId + "'";
+String partName = "'" + StatisticsUtil.escapeSQL(key.partId) + "'";
 partitionResults = StatisticsRepository.loadPartitionColumnStats(
 key.catalogId, key.dbId, key.tableId, key.idxId, partName, 
key.colName);
 } catch (InternalQueryExecutionException e) {
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsRepository.java
 
b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsRepository.java
index bbcc9de3a28..81192762f99 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsRepository.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsRepository.java
@@ -144,12 +144,12 @@ public class StatisticsRepository {
 params.put("tableId", String.valueOf(table.getId()));
 StringJoiner sj = new StringJoiner(",");
 for (String colName : columnNames) {
-sj.add("'" + colName + "'");
+sj.add("'" + StatisticsUtil.escapeSQL(colName) + "'");
 }
 params.put("columnInfo", sj.toString());
 sj = new StringJoiner(",");
 for (String part : partitionNames) {
-sj.add("'" + part + "'");
+sj.add("'" + StatisticsUtil.escapeSQL(part) + "'");
 }
 params.put("partitionInfo", sj.toString());
 return 
StatisticsUtil.executeQuery(FETCH_PARTITIONS_STATISTIC_TEMPLATE, params);
@@ -418,7 +418,7 @@ public class StatisticsRepository {
 params.put("tableId", String.valueOf(tableId));
 params.put("indexId", String.valueOf(idxId));
 params.put("partName", partName);
-params.put("columnId", colName);
+params.put("columnId", StatisticsUtil.escapeSQL(colName));
 return StatisticsUtil.execStatisticQuery(new StringSubstitutor(params)
 .replace(FETCH_PARTITION_STATISTIC_TEMPLATE));
 }
diff --git a/regression-test/suites/statistics/test_partition_stats.groovy 
b/regression-test/suites/statistics/test_partition_stats.groovy
index 7658d50c47e..05216477323 100644
--- a/regression-test/suites/statistics/test_partition_stats.groovy
+++ b/regression-test/suites/statistics/test_partition_stats.groovy
@@ -928,6 +928,33 @@ suite("test_partition_stats") {
 assertEquals("1", result[0][7])
 assertEquals("20004", result[0][8])
 
+// Test escape special col name.
+sql """
+create table part9(
+k int null,
+v variant null
+)
+duplicate key (k)
+ 

(doris) branch branch-2.0 updated: [fix](oom) avoid oom when a lot of tablets fail on load (#36962)

2024-06-28 Thread dataroaring
This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.0 by this push:
 new f95221e34e1 [fix](oom) avoid oom when a lot of tablets fail on load 
(#36962)
f95221e34e1 is described below

commit f95221e34e167e5149dda9dfcc7ae5962b1582c0
Author: Yongqiang YANG <98214048+dataroar...@users.noreply.github.com>
AuthorDate: Fri Jun 28 21:07:08 2024 +0800

[fix](oom) avoid oom when a lot of tablets fail on load (#36962)

When a lot of tablets fail when loading, then detailed information would
cause oom.

## Proposed changes

Issue Number: close #xxx


---
 .../doris/transaction/DatabaseTransactionMgr.java  | 59 ++
 1 file changed, 48 insertions(+), 11 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java
 
b/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java
index 36b32f01069..7ef043136ba 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java
@@ -1028,6 +1028,45 @@ public class DatabaseTransactionMgr {
 return true;
 }
 
+private class TabletsPublishResultLogs {
+public List quorumSuccLogs = Lists.newArrayList();
+public List timeoutSuccLogs = Lists.newArrayList();
+public List failedLogs = Lists.newArrayList();
+
+public void addQuorumSuccLog(String log) {
+if (quorumSuccLogs.size() < 16) {
+quorumSuccLogs.add(log);
+}
+}
+
+public void addTimeoutSuccLog(String log) {
+if (timeoutSuccLogs.size() < 16) {
+timeoutSuccLogs.add(log);
+}
+}
+
+public void addFailedLog(String log) {
+if (failedLogs.size() < 16) {
+failedLogs.add(log);
+}
+}
+
+public void log() {
+// log failed logs
+for (String log : failedLogs) {
+LOG.info(log);
+}
+// log timeout succ logs
+for (String log : timeoutSuccLogs) {
+LOG.info(log);
+}
+// log quorum succ logs
+for (String log : quorumSuccLogs) {
+LOG.info(log);
+}
+}
+}
+
 private PublishResult finishCheckQuorumReplicas(TransactionState 
transactionState,
 List> relatedTblPartitions,
 Set errorReplicaIds) {
@@ -1042,7 +1081,7 @@ public class DatabaseTransactionMgr {
 List tabletSuccReplicas = Lists.newArrayList();
 List tabletWriteFailedReplicas = Lists.newArrayList();
 List tabletVersionFailedReplicas = Lists.newArrayList();
-List logs = Lists.newArrayList();
+TabletsPublishResultLogs logs = new TabletsPublishResultLogs();
 
 Map publishTasks = 
transactionState.getPublishVersionTasks();
 PublishResult publishResult = PublishResult.QUORUM_SUCC;
@@ -1092,9 +1131,9 @@ public class DatabaseTransactionMgr {
 if (hasFailedReplica) {
 String writeDetail = 
getTabletWriteDetail(tabletSuccReplicas,
 tabletWriteFailedReplicas, 
tabletVersionFailedReplicas);
-logs.add(String.format("publish version quorum 
succ for transaction %s on tablet %s"
-+ " with version %s, and has failed 
replicas, load require replica num %s. "
-+ "table %s, partition %s, tablet detail: 
%s",
+logs.addQuorumSuccLog(String.format("publish 
version quorum succ for transaction %s on "
++ "tablet %s with version %s, and has 
failed replicas, load require replica num "
++ " %s. table %s, partition %s, tablet 
detail: %s",
 transactionState, tablet.getId(), 
newVersion,
 loadRequiredReplicaNum, tableId, 
partitionId, writeDetail));
 }
@@ -1116,8 +1155,8 @@ public class DatabaseTransactionMgr {
 // that are being publised exists on a few replicas we 
should go
 // ahead, otherwise data may be lost and thre
 // publish task hangs forever.
-logs.add(String.format("publish version timeout succ 
for transaction %s on tablet %s "
-+ "with version %s, and has failed replicas, 
load require replica num %s. "
+logs.addTimeoutSuccLog(String.format("publish version 
timeout succ for transaction %s on

(doris) branch master updated: [fix](mtmv)fix mtmv dead lock (#37009)

2024-06-28 Thread dataroaring
This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
 new 620653f1d4d [fix](mtmv)fix mtmv dead lock (#37009)
620653f1d4d is described below

commit 620653f1d4d143b3e862d2810bb27a1e9105ff2f
Author: zhangdong <493738...@qq.com>
AuthorDate: Fri Jun 28 21:09:35 2024 +0800

[fix](mtmv)fix mtmv dead lock (#37009)

Cause of occurrence:
- dropBaseTable: Holding db's writeLock ,notify mtmv alter status to
schema_change,need mv's writeLock
- task(insert overwrite):Holding mv's readLock,when generage plan need
db's readLock

fix:
- mtmv alter status to schema_change need mv's writeMvLock instead of
mv's writeLock
---
 fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java 
b/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java
index a029f604dfd..26045f8527a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java
@@ -916,7 +916,7 @@ public class Alter {
 Database db = 
Env.getCurrentInternalCatalog().getDbOrDdlException(tbl.getDb());
 mtmv = (MTMV) db.getTableOrMetaException(tbl.getTbl(), 
TableType.MATERIALIZED_VIEW);
 
-mtmv.writeLock();
+mtmv.writeMvLock();
 switch (alterMTMV.getOpType()) {
 case ALTER_REFRESH_INFO:
 mtmv.alterRefreshInfo(alterMTMV.getRefreshInfo());
@@ -945,7 +945,7 @@ public class Alter {
 LOG.warn(e);
 } finally {
 if (mtmv != null) {
-mtmv.writeUnlock();
+mtmv.writeMvUnlock();
 }
 }
 }


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch master updated: [Fix](autoinc) try fix concurrent load problem with auto inc column (#36421)

2024-06-28 Thread dataroaring
This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
 new 10596044c84 [Fix](autoinc) try fix concurrent load problem with auto 
inc column (#36421)
10596044c84 is described below

commit 10596044c8481e94250c830268686bf7fdfa3322
Author: bobhan1 
AuthorDate: Fri Jun 28 21:12:21 2024 +0800

[Fix](autoinc) try fix concurrent load problem with auto inc column (#36421)

1. increase the value of `AutoIncrementGenerator.BATCH_ID_INTERVAL` to
reduce the number of writes to BDBJE. (the default value of
`config::auto_inc_prefetch_size_ratio` is 10 and the default value of
`AutoIncIDBuffer::_batch_size` is 4064, so mostly the request length is
40960)
2. only allow master fe to offer `getAutoIncrementRange` service
3. write editlog before update `batchEndId` in memory in
`getAutoIncrementRange `
4. refactor `AutoIncIDBuffer`
---
 be/src/vec/sink/autoinc_buffer.cpp | 150 +
 be/src/vec/sink/autoinc_buffer.h   |  35 +++--
 .../doris/catalog/AutoIncrementGenerator.java  |   5 +-
 .../apache/doris/service/FrontendServiceImpl.java  |  10 ++
 gensrc/thrift/FrontendService.thrift   |   1 +
 .../unique/test_unique_auto_inc_concurrent.out |  10 ++
 .../unique/test_unique_auto_inc_concurrent.groovy  |  59 
 7 files changed, 203 insertions(+), 67 deletions(-)

diff --git a/be/src/vec/sink/autoinc_buffer.cpp 
b/be/src/vec/sink/autoinc_buffer.cpp
index c7c096ec6e8..f83dbcb55b8 100644
--- a/be/src/vec/sink/autoinc_buffer.cpp
+++ b/be/src/vec/sink/autoinc_buffer.cpp
@@ -19,14 +19,15 @@
 
 #include 
 
-#include 
+#include 
+#include 
 
+#include "common/logging.h"
 #include "common/status.h"
 #include "runtime/client_cache.h"
 #include "runtime/exec_env.h"
 #include "util/runtime_profile.h"
 #include "util/thrift_rpc_helper.h"
-#include "vec/sink/vtablet_block_convertor.h"
 
 namespace doris::vectorized {
 
@@ -42,54 +43,11 @@ void AutoIncIDBuffer::set_batch_size_at_least(size_t 
batch_size) {
 }
 }
 
-void AutoIncIDBuffer::_wait_for_prefetching() {
-if (_is_fetching) {
-_rpc_token->wait();
-}
-}
-
-Status AutoIncIDBuffer::sync_request_ids(size_t length,
- std::vector>* result) {
-std::unique_lock lock(_mutex);
-RETURN_IF_ERROR(_prefetch_ids(_prefetch_size()));
-if (_front_buffer.second > 0) {
-auto min_length = std::min(_front_buffer.second, length);
-length -= min_length;
-result->emplace_back(_front_buffer.first, min_length);
-_front_buffer.first += min_length;
-_front_buffer.second -= min_length;
-}
-if (length > 0) {
-_wait_for_prefetching();
-if (!_rpc_status.ok()) {
-return _rpc_status;
-}
-
-{
-std::lock_guard lock(_backend_buffer_latch);
-std::swap(_front_buffer, _backend_buffer);
-}
-
-DCHECK_LE(length, _front_buffer.second);
-if (length > _front_buffer.second) {
-return Status::RpcError("auto inc sync result length > front 
buffer. " +
-std::to_string(length) + " vs " +
-std::to_string(_front_buffer.second));
-}
-result->emplace_back(_front_buffer.first, length);
-_front_buffer.first += length;
-_front_buffer.second -= length;
-}
-return Status::OK();
-}
-
-Status AutoIncIDBuffer::_prefetch_ids(size_t length) {
-if (_front_buffer.second > _low_water_level_mark() || _is_fetching) {
-return Status::OK();
-}
+Result AutoIncIDBuffer::_fetch_ids_from_fe(size_t length) {
+constexpr uint32_t FETCH_AUTOINC_MAX_RETRY_TIMES = 3;
+_rpc_status = Status::OK();
 TNetworkAddress master_addr = 
ExecEnv::GetInstance()->master_info()->network_address;
-_is_fetching = true;
-RETURN_IF_ERROR(_rpc_token->submit_func([=, this]() {
+for (uint32_t retry_times = 0; retry_times < 
FETCH_AUTOINC_MAX_RETRY_TIMES; retry_times++) {
 TAutoIncrementRangeRequest request;
 TAutoIncrementRangeResult result;
 request.__set_db_id(_db_id);
@@ -97,7 +55,7 @@ Status AutoIncIDBuffer::_prefetch_ids(size_t length) {
 request.__set_column_id(_column_id);
 request.__set_length(length);
 
-int64_t get_auto_inc_range_rpc_ns;
+int64_t get_auto_inc_range_rpc_ns = 0;
 {
 SCOPED_RAW_TIMER(&get_auto_inc_range_rpc_ns);
 _rpc_status = ThriftRpcHelper::rpc(
@@ -109,15 +67,95 @@ Status AutoIncIDBuffer::_prefetch_ids(size_t length) {
 LOG(INFO) << "[auto-inc-range][start=" << result.start << ",length=" 
<< result.length
   << "][elapsed=" << get_auto_inc_range_rpc_ns / 100 << " 
ms]"

(doris) branch master updated: [bugfix](testcase)add java error log output (#35998)

2024-06-28 Thread morningman
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
 new aeb89db2a57 [bugfix](testcase)add java error log output (#35998)
aeb89db2a57 is described below

commit aeb89db2a5717dd2faf0bc07aaec5cd9a1668f7b
Author: wuwenchi 
AuthorDate: Fri Jun 28 21:56:43 2024 +0800

[bugfix](testcase)add java error log output (#35998)

In the testcase pipeline, this spark-connector case has a 4% chance of
error, but since there is no error log, the cause of the error cannot be
located.
Therefore, an error log is added to facilitate problem location later.
---
 .../suites/connector_p0/spark_connector/spark_connector.groovy | 10 --
 .../connector_p0/spark_connector/spark_connector_arrow.groovy  | 10 --
 .../spark_connector/spark_connector_read_type.groovy   | 10 --
 3 files changed, 24 insertions(+), 6 deletions(-)

diff --git 
a/regression-test/suites/connector_p0/spark_connector/spark_connector.groovy 
b/regression-test/suites/connector_p0/spark_connector/spark_connector.groovy
index ecd4e6dfc14..2bd618fcc3c 100644
--- a/regression-test/suites/connector_p0/spark_connector/spark_connector.groovy
+++ b/regression-test/suites/connector_p0/spark_connector/spark_connector.groovy
@@ -28,7 +28,13 @@ suite("spark_connector", "connector") {
 logger.info("finish download spark doris demo ...")
 def run_cmd = "java -jar spark-doris-demo.jar 
$context.config.feHttpAddress $context.config.feHttpUser 
regression_test_connector_p0_spark_connector.$tableName"
 logger.info("run_cmd : $run_cmd")
-def run_spark_jar = run_cmd.execute().getText()
-logger.info("result: $run_spark_jar")
+def proc = run_cmd.execute()
+def sout = new StringBuilder()
+def serr = new StringBuilder()
+proc.consumeProcessOutput(sout, serr)
+proc.waitForOrKill(1200_000)
+if (proc.exitValue() != 0) {
+  logger.warn("failed to execute jar: code=${proc.exitValue()}, " + 
"output: ${sout.toString()}, error: ${serr.toString()}")
+}
 qt_select """ select * from $tableName order by order_id"""
 }
diff --git 
a/regression-test/suites/connector_p0/spark_connector/spark_connector_arrow.groovy
 
b/regression-test/suites/connector_p0/spark_connector/spark_connector_arrow.groovy
index 1cd2ed31d2e..a5fbc3b2835 100644
--- 
a/regression-test/suites/connector_p0/spark_connector/spark_connector_arrow.groovy
+++ 
b/regression-test/suites/connector_p0/spark_connector/spark_connector_arrow.groovy
@@ -136,8 +136,14 @@ suite("spark_connector_for_arrow", "connector") {
 logger.info("finish download spark doris demo ...")
 def run_cmd = "java -cp ${jar_name} 
org.apache.doris.spark.testcase.TestStreamLoadForArrowType 
$context.config.feHttpAddress $context.config.feHttpUser 
regression_test_connector_p0_spark_connector"
 logger.info("run_cmd : $run_cmd")
-def run_spark_jar = run_cmd.execute().getText()
-logger.info("result: $run_spark_jar")
+def proc = run_cmd.execute()
+def sout = new StringBuilder()
+def serr = new StringBuilder()
+proc.consumeProcessOutput(sout, serr)
+proc.waitForOrKill(1200_000)
+if (proc.exitValue() != 0) {
+  logger.warn("failed to execute jar: code=${proc.exitValue()}, " + 
"output: ${sout.toString()}, error: ${serr.toString()}")
+}
 
 qt_q01 """ select * from spark_connector_primitive """
 qt_q02 """ select * from spark_connector_array """
diff --git 
a/regression-test/suites/connector_p0/spark_connector/spark_connector_read_type.groovy
 
b/regression-test/suites/connector_p0/spark_connector/spark_connector_read_type.groovy
index 32a3ebf68c7..632e5e3d401 100644
--- 
a/regression-test/suites/connector_p0/spark_connector/spark_connector_read_type.groovy
+++ 
b/regression-test/suites/connector_p0/spark_connector/spark_connector_read_type.groovy
@@ -99,8 +99,14 @@ suite("spark_connector_read_type", "connector") {
 logger.info("finish download spark doris demo ...")
 def run_cmd = "java -jar spark-doris-read.jar 
$context.config.feHttpAddress $context.config.feHttpUser 
regression_test_connector_p0_spark_connector.$tableReadName 
regression_test_connector_p0_spark_connector.$tableWriterName"
 logger.info("run_cmd : $run_cmd")
-def run_spark_jar = run_cmd.execute().getText()
-logger.info("result: $run_spark_jar")
+def proc = run_cmd.execute()
+def sout = new StringBuilder()
+def serr = new StringBuilder()
+proc.consumeProcessOutput(sout, serr)
+proc.waitForOrKill(1200_000)
+if (proc.exitValue() != 0) {
+  logger.warn("failed to execute jar: code=${proc.exitValue()}, " + 
"output: ${sout.toString()}, error: ${serr.toString()}")
+}
 
 qt_select """ select * from $tableWriterName order by id"""
 


-

(doris) branch master updated: Revert "[feature](cloud) support file cache only cache index pages (#36273) (#36797)

2024-06-28 Thread gavinchou
This is an automated email from the ASF dual-hosted git repository.

gavinchou pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
 new 96178236866 Revert "[feature](cloud) support file cache only cache 
index pages (#36273) (#36797)
96178236866 is described below

commit 96178236866daa8358e06349edb366cb1cb5dca7
Author: zhengyu 
AuthorDate: Sat Jun 29 02:13:28 2024 +0800

Revert "[feature](cloud) support file cache only cache index pages (#36273) 
(#36797)

This reverts commit f5c40a5c3bac3091707c8324c422889fa7f030c8.
This is a experimental (and buggy) commit. I hava found that caching
index only helps little. With that be the result, I think it is a good
time to
revert it.
---
 be/src/common/config.cpp| 2 --
 be/src/common/config.h  | 2 --
 be/src/olap/rowset/segment_v2/column_reader.cpp | 4 +---
 be/src/olap/rowset/segment_v2/indexed_column_reader.cpp | 3 +--
 4 files changed, 2 insertions(+), 9 deletions(-)

diff --git a/be/src/common/config.cpp b/be/src/common/config.cpp
index 91707b5a8a2..3e948f4cca2 100644
--- a/be/src/common/config.cpp
+++ b/be/src/common/config.cpp
@@ -1003,8 +1003,6 @@ DEFINE_Bool(enable_file_cache, "false");
 // format: 
[{"path":"/path/to/file_cache","total_size":21474836480,"query_limit":10737418240},{"path":"/path/to/file_cache2","total_size":21474836480,"query_limit":10737418240}]
 DEFINE_String(file_cache_path, "");
 DEFINE_Int64(file_cache_each_block_size, "1048576"); // 1MB
-// only cache index pages (prerequisite: enable_file_cache = true)
-DEFINE_Bool(file_cache_index_only, "false");
 
 DEFINE_Bool(clear_file_cache, "false");
 DEFINE_Bool(enable_file_cache_query_limit, "false");
diff --git a/be/src/common/config.h b/be/src/common/config.h
index f35aeb61747..6f0065e2fe3 100644
--- a/be/src/common/config.h
+++ b/be/src/common/config.h
@@ -1047,8 +1047,6 @@ DECLARE_Bool(enable_file_cache);
 // format: 
[{"path":"/path/to/file_cache","total_size":21474836480,"query_limit":10737418240,"normal_percent":85,
 "disposable_percent":10, "index_percent":5}]
 DECLARE_String(file_cache_path);
 DECLARE_Int64(file_cache_each_block_size);
-// only cache index pages (prerequisite: enable_file_cache = true)
-DECLARE_Bool(file_cache_index_only);
 DECLARE_Bool(clear_file_cache);
 DECLARE_Bool(enable_file_cache_query_limit);
 DECLARE_Int32(file_cache_enter_disk_resource_limit_mode_percent);
diff --git a/be/src/olap/rowset/segment_v2/column_reader.cpp 
b/be/src/olap/rowset/segment_v2/column_reader.cpp
index 7396bce1a55..392917e0d83 100644
--- a/be/src/olap/rowset/segment_v2/column_reader.cpp
+++ b/be/src/olap/rowset/segment_v2/column_reader.cpp
@@ -342,11 +342,9 @@ Status ColumnReader::read_page(const 
ColumnIteratorOptions& iter_opts, const Pag
PageHandle* handle, Slice* page_body, 
PageFooterPB* footer,
BlockCompressionCodec* codec) const {
 iter_opts.sanity_check();
-bool use_page_cache = iter_opts.use_page_cache &&
-  (!config::file_cache_index_only || iter_opts.type == 
INDEX_PAGE);
 PageReadOptions opts {
 .verify_checksum = _opts.verify_checksum,
-.use_page_cache = use_page_cache,
+.use_page_cache = iter_opts.use_page_cache,
 .kept_in_memory = _opts.kept_in_memory,
 .type = iter_opts.type,
 .file_reader = iter_opts.file_reader,
diff --git a/be/src/olap/rowset/segment_v2/indexed_column_reader.cpp 
b/be/src/olap/rowset/segment_v2/indexed_column_reader.cpp
index 94429920877..59251b5595d 100644
--- a/be/src/olap/rowset/segment_v2/indexed_column_reader.cpp
+++ b/be/src/olap/rowset/segment_v2/indexed_column_reader.cpp
@@ -115,9 +115,8 @@ Status IndexedColumnReader::read_page(const PagePointer& 
pp, PageHandle* handle,
   PageFooterPB* footer, PageTypePB type,
   BlockCompressionCodec* codec, bool 
pre_decode) const {
 OlapReaderStatistics tmp_stats;
-bool use_page_cache = _use_page_cache && (!config::file_cache_index_only 
|| type == INDEX_PAGE);
 PageReadOptions opts {
-.use_page_cache = use_page_cache,
+.use_page_cache = _use_page_cache,
 .kept_in_memory = _kept_in_memory,
 .pre_decode = pre_decode,
 .type = type,


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris) branch branch-2.1 updated: [Improvement](set) enable admin_set_frontend_config can apply to all fe (#37022)

2024-06-28 Thread morningman
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new a9855ca1738 [Improvement](set) enable admin_set_frontend_config can 
apply to all fe (#37022)
a9855ca1738 is described below

commit a9855ca17387369849cb489e376b11b16c0757cc
Author: Yulei-Yang 
AuthorDate: Sat Jun 29 09:17:38 2024 +0800

[Improvement](set) enable admin_set_frontend_config can apply to all fe 
(#37022)

bp #34685
---
 fe/fe-core/src/main/cup/sql_parser.cup |  10 +-
 .../apache/doris/analysis/AdminSetConfigStmt.java  |  19 +-
 .../main/java/org/apache/doris/catalog/Env.java|  19 +-
 .../java/org/apache/doris/qe/FEOpExecutor.java | 216 +
 4 files changed, 261 insertions(+), 3 deletions(-)

diff --git a/fe/fe-core/src/main/cup/sql_parser.cup 
b/fe/fe-core/src/main/cup/sql_parser.cup
index a62f3dcf0f1..871c0261e0c 100644
--- a/fe/fe-core/src/main/cup/sql_parser.cup
+++ b/fe/fe-core/src/main/cup/sql_parser.cup
@@ -7517,7 +7517,15 @@ admin_stmt ::=
 :}
 | KW_ADMIN KW_SET KW_FRONTEND KW_CONFIG opt_key_value_map:configs
 {:
-RESULT = new 
AdminSetConfigStmt(AdminSetConfigStmt.ConfigType.FRONTEND, configs);
+RESULT = new 
AdminSetConfigStmt(AdminSetConfigStmt.ConfigType.FRONTEND, configs, false);
+:}
+| KW_ADMIN KW_SET KW_ALL KW_FRONTENDS KW_CONFIG opt_key_value_map:configs
+{:
+RESULT = new 
AdminSetConfigStmt(AdminSetConfigStmt.ConfigType.FRONTEND, configs, true);
+:}
+| KW_ADMIN KW_SET KW_FRONTEND KW_CONFIG opt_key_value_map:configs KW_ALL
+{:
+RESULT = new 
AdminSetConfigStmt(AdminSetConfigStmt.ConfigType.FRONTEND, configs, true);
 :}
 // deprecated
 | KW_ADMIN KW_SHOW KW_FRONTEND KW_CONFIG opt_wild_where
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminSetConfigStmt.java 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminSetConfigStmt.java
index 166f9a70096..1d2e22ee878 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminSetConfigStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminSetConfigStmt.java
@@ -25,6 +25,7 @@ import org.apache.doris.common.ErrorReport;
 import org.apache.doris.common.UserException;
 import org.apache.doris.mysql.privilege.PrivPredicate;
 import org.apache.doris.qe.ConnectContext;
+import org.apache.doris.qe.OriginStatement;
 
 import com.google.common.collect.Maps;
 
@@ -38,22 +39,25 @@ public class AdminSetConfigStmt extends DdlStmt {
 BACKEND
 }
 
+private boolean applyToAll;
 private ConfigType type;
 private Map configs;
 
 private RedirectStatus redirectStatus = RedirectStatus.NO_FORWARD;
 
-public AdminSetConfigStmt(ConfigType type, Map configs) {
+public AdminSetConfigStmt(ConfigType type, Map configs, 
boolean applyToAll) {
 this.type = type;
 this.configs = configs;
 if (this.configs == null) {
 this.configs = Maps.newHashMap();
 }
+this.applyToAll = applyToAll;
 
 // we have to analyze configs here to determine whether to forward it 
to master
 for (String key : this.configs.keySet()) {
 if (ConfigBase.checkIsMasterOnly(key)) {
 redirectStatus = RedirectStatus.FORWARD_NO_SYNC;
+this.applyToAll = false;
 }
 }
 }
@@ -66,6 +70,10 @@ public class AdminSetConfigStmt extends DdlStmt {
 return configs;
 }
 
+public boolean isApplyToAll() {
+return applyToAll;
+}
+
 @Override
 public void analyze(Analyzer analyzer) throws AnalysisException, 
UserException {
 super.analyze(analyzer);
@@ -87,4 +95,13 @@ public class AdminSetConfigStmt extends DdlStmt {
 public RedirectStatus getRedirectStatus() {
 return redirectStatus;
 }
+
+public OriginStatement getLocalSetStmt() {
+OriginStatement stmt = this.getOrigStmt();
+Object[] keyArr = configs.keySet().toArray();
+String sql = String.format("ADMIN SET FRONTEND CONFIG (\"%s\" = 
\"%s\");",
+keyArr[0].toString(), configs.get(keyArr[0].toString()));
+
+return new OriginStatement(sql, stmt.idx);
+}
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java
index 380eb361c00..6f8cd9e727d 100755
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java
@@ -233,6 +233,7 @@ import org.apache.doris.plugin.PluginMgr;
 import org.apache.doris.policy.PolicyMgr;
 import org.apache.doris.qe.AuditEventProcessor;
 import org.apache.doris.qe.ConnectContext;
+import org.apache.doris.qe.FEOpExecutor;
 import org.apache.doris.qe.GlobalVariable;
 import org.apache.d

(doris) branch master updated: (cloud-merge) Get fileCacheSize by RPC always (#36857)

2024-06-28 Thread gavinchou
This is an automated email from the ASF dual-hosted git repository.

gavinchou pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
 new e35d433629d (cloud-merge) Get fileCacheSize by RPC always (#36857)
e35d433629d is described below

commit e35d433629d16ab9973552c6ded69412302a17ae
Author: Lightman <31928846+lchangli...@users.noreply.github.com>
AuthorDate: Sat Jun 29 09:25:40 2024 +0800

(cloud-merge) Get fileCacheSize by RPC always (#36857)

Get fileCacheSize every time when do warm up job.
---
 .../apache/doris/cloud/CacheHotspotManager.java| 39 ++
 .../main/java/org/apache/doris/system/Backend.java | 10 --
 2 files changed, 18 insertions(+), 31 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManager.java 
b/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManager.java
index 304f76dee48..4f359446aad 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManager.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManager.java
@@ -152,7 +152,6 @@ public class CacheHotspotManager extends MasterDaemon {
 TGetTopNHotPartitionsResponse resp = respPair.first;
 if (resp.isSetHotTables()) {
 resp.getHotTables().forEach((THotTableMessage hotTable) -> 
{
-
respPair.second.setfileCacheCapacityBytes(resp.file_cache_size);
 if (hotTable.isSetHotPartitions()) {
 hotTable.hot_partitions.forEach((THotPartition 
partition) -> {
 insertIntoTable(clusterToBeList.getKey(), 
hotTable.table_id,
@@ -337,26 +336,24 @@ public class CacheHotspotManager extends MasterDaemon {
 .getBackendsByClusterName(clusterName);
 Long totalFileCache = 0L;
 for (Backend backend : backends) {
-Long fileCacheSize = backend.getfileCacheCapactiyBytes();
-if (fileCacheSize == 0) {
-boolean ok = false;
-BackendService.Client client = null;
-TNetworkAddress address = null;
-try {
-address = new TNetworkAddress(backend.getHost(), 
backend.getBePort());
-client = ClientPool.backendPool.borrowObject(address);
-TGetTopNHotPartitionsResponse resp = 
client.getTopNHotPartitions(
-new TGetTopNHotPartitionsRequest());
-fileCacheSize = resp.file_cache_size;
-ok = true;
-} catch (Exception e) {
-throw new RuntimeException(e);
-} finally {
-if (ok) {
-ClientPool.backendPool.returnObject(address, client);
-} else {
-ClientPool.backendPool.invalidateObject(address, 
client);
-}
+Long fileCacheSize = 0L;
+boolean ok = false;
+BackendService.Client client = null;
+TNetworkAddress address = null;
+try {
+address = new TNetworkAddress(backend.getHost(), 
backend.getBePort());
+client = ClientPool.backendPool.borrowObject(address);
+TGetTopNHotPartitionsResponse resp = 
client.getTopNHotPartitions(
+new TGetTopNHotPartitionsRequest());
+fileCacheSize = resp.file_cache_size;
+ok = true;
+} catch (Exception e) {
+throw new RuntimeException(e);
+} finally {
+if (ok) {
+ClientPool.backendPool.returnObject(address, client);
+} else {
+ClientPool.backendPool.invalidateObject(address, client);
 }
 }
 totalFileCache += fileCacheSize;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java 
b/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java
index 902b90843b6..a366aca5d6b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java
@@ -148,8 +148,6 @@ public class Backend implements Writable {
 // send some queries to this BE, it is not an important problem.
 private AtomicBoolean isShutDown = new AtomicBoolean(false);
 
-private long fileCacheCapactiyBytes = 0;
-
 public Backend() {
 this.host = "";
 this.version = "";
@@ -241,14 +239,6 @@ public class Backend implements Writable {
 return heartbeatPort;
 }
 
-public void setfileCacheCapacityBytes(long fileCacheCapactiyBytes) {
-this.fileCacheCapactiyBytes = fileCacheCapactiyBytes;
-}
-
-public long getfileCacheCapactiyByt

Error while running notifications feature from refs/heads/master:.asf.yaml in doris-website!

2024-06-28 Thread Apache Infrastructure


An error occurred while running notifications feature in .asf.yaml!:
Invalid notification target 'comm...@foo.apache.org'. Must be a valid 
@doris.apache.org list!


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris-website) branch master updated: [community](versioning) Add Doris versioning guidelines on Community docs (#801)

2024-06-28 Thread luzhijing
This is an automated email from the ASF dual-hosted git repository.

luzhijing pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris-website.git


The following commit(s) were added to refs/heads/master by this push:
 new 641c09b518 [community](versioning) Add Doris versioning guidelines on 
Community docs (#801)
641c09b518 is described below

commit 641c09b51843d7da5ac60705490af8e630dd4812
Author: KassieZ <139741991+kass...@users.noreply.github.com>
AuthorDate: Sat Jun 29 09:06:55 2024 +0700

[community](versioning) Add Doris versioning guidelines on Community docs 
(#801)
---
 community/release-versioning.md| 63 ++
 .../current/release-versioning.md  | 63 ++
 sidebarsCommunity.json |  1 +
 3 files changed, 127 insertions(+)

diff --git a/community/release-versioning.md b/community/release-versioning.md
new file mode 100644
index 00..6c2c384265
--- /dev/null
+++ b/community/release-versioning.md
@@ -0,0 +1,63 @@
+---
+{
+"title": "Doris Versioning",
+"language": "en"
+}
+---
+
+
+
+## Release Versioning
+
+Apache Doris uses a three-digit version number (X.Y.Z)
+
+- **Major version X**: represents major feature releases or architecture 
upgrades, such as the vectorized execution engine in version 1.x.x; the new 
optimizer in version 2.x.x; and the storage-computing separation architecture 
in version 3.x.x. Major versions are usually changed on a grade-level cycle.
+
+- **Minor version Y**: represents the release of important features, 
performance optimization, or changes in metadata and data formats. Minor 
version changes are usually updated on a quarterly basis.
+
+- **Patch version Z**: mainly used to fix bugs, optimize performance, and 
update minor features. The patch version is usually released every 2-4 weeks.
+
+## Version Upgrade and Downgrade
+
+- Major (X) and minor (Y) version upgrades may alter metadata or data formats. 
Apache Doris ensures forward compatibility for these changes, allowing upgrades 
from old to new versions, but not downgrades. Therefore, it is recommended to 
backup data before upgrading to a major or minor version.
+
+- Patch version (Z) offers full compatibility, supports upgrading and 
downgrading between new and old versions, and prevents data compatibility 
issues.
+
+## How to choose a version
+
+Apache Doris maintains two main version branches: Stable and Latest.
+
+- The latest version includes the latest features, optimizations, and bug 
fixes, ideal for users who want to test new features, conduct POCs, performance 
testing, or test pre-launch environments.
+
+- Stable version provides continuous bug fixes, ensuring greater stability and 
is recommended for production use.
+
+## CPU Model and Binary Version
+
+Apache Doris offers three binary versions:
+
+- **x64 (avx2)**: for x86_64 CPUs supporting avx2 instructions.
+
+- **x64 (no avx2)**: for x86_64 CPUs not supporting avx2.
+
+- **ARM64**: for ARM architecture CPUs.
+
+:::tip
+You can check whether the CPU supports the avx2 instruction by running the 
command `cat /proc/cpuinfo |grep avx2`.
+:::
\ No newline at end of file
diff --git 
a/i18n/zh-CN/docusaurus-plugin-content-docs-community/current/release-versioning.md
 
b/i18n/zh-CN/docusaurus-plugin-content-docs-community/current/release-versioning.md
new file mode 100644
index 00..ce5b66ca1a
--- /dev/null
+++ 
b/i18n/zh-CN/docusaurus-plugin-content-docs-community/current/release-versioning.md
@@ -0,0 +1,63 @@
+---
+{
+"title": "版本规则",
+"language": "zh-CN"
+}
+---
+
+
+
+## 版本号规则
+
+Apache Doris 使用三位版本号(X.Y.Z)
+
+- **主版本(X)**:代表重大功能发布或架构升级。主版本变通常以年级别为周期变更。
+
+- **次版本(Y)**:代表重要功能的发版、性能优化或必要性的元数据与数据格式的变更。次版本变更通常以季度为周期更新。
+
+- **补丁版本(Z)**:主要用于修复 Bug、性能优化以及功能更新。补丁版本更新周期短,通常每 2-4 周发布新版本。
+
+## 版本升级和降级
+
+- 主版本(X)和次版本(Y)的升级可能涉及元数据或数据格式的变更。Apache Doris 
保证这些变更可以向前兼容(即可以从老版本升级到新版本),但不保证向后兼容(即不保证新版本可以降级到老版本)。因此,建议进行主版本或次版本升级前,做好数据备份。
+
+- 补丁版本(Z)保证前后向兼容,支持新版本和老版本之间的升降级,无需担心数据兼容性问题。
+
+## 如何选择版本
+
+Apache Doris 团队主要维护最新的两位版本分支,通常标注为 Latest 和 Stable。
+
+- **Latest 版本**:包含最新的功能、优化和问题修复,适合希望试用新功能、进行可行性验证(POC)、性能测试或测试环境预上线的用户使用。
+
+- **Stable 版本**:持续包含对应分支的 Bug 修复,稳定性更高,建议生产环境使用此版本。
+
+## CPU 型号与 Binary 版本
+
+Apache Doris 提供三种不同的 Binary 以对应不同的 CPU 型号:
+
+- x64(avx2):适用于支持 avx2 指令的 x86_64 架构 CPU。
+
+- x64(no avx2):适用于不支持 avx2 指令的 x86_64 架构 CPU。
+
+- ARM64:适用于 ARM 架构的 CPU。
+
+:::tip 提示
+可以通过 `cat /proc/cpuinfo |grep avx2` 命令查看 CPU 是否支持 avx2 指令。
+:::
\ No newline at end of file
diff --git a/sidebarsCommunity.json b/sidebarsCommunity.json
index 740d75f248..c4845eea05 100644
--- a/sidebarsCommunity.json
+++ b/sidebarsCommunity.json
@@ -19,6 +19,7 @@
 "how-to-contribute/trino-connector-developer-guide"
 ]
 },
+"release-versioning",
 {
 "type": "category",
 "label": "Release Process & Verification",



Error while running notifications feature from refs/heads/master:.asf.yaml in doris-website!

2024-06-28 Thread Apache Infrastructure


An error occurred while running notifications feature in .asf.yaml!:
Invalid notification target 'comm...@foo.apache.org'. Must be a valid 
@doris.apache.org list!


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris-website) branch master updated: [Fix](docs) Export/Outfile supports `compress_type` properties. (#764)

2024-06-28 Thread luzhijing
This is an automated email from the ASF dual-hosted git repository.

luzhijing pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris-website.git


The following commit(s) were added to refs/heads/master by this push:
 new ccf39c1174 [Fix](docs) Export/Outfile supports `compress_type`  
properties. (#764)
ccf39c1174 is described below

commit ccf39c11744d21ff96abf0d4f7ce718b390f6ad8
Author: Tiewei Fang <43782773+bepppo...@users.noreply.github.com>
AuthorDate: Sat Jun 29 10:07:15 2024 +0800

[Fix](docs) Export/Outfile supports `compress_type`  properties. (#764)

https://github.com/apache/doris/pull/36490

-

Co-authored-by: Luzhijing <82810928+luzhij...@users.noreply.github.com>
---
 docs/data-operate/export/export-overview.md|  2 +
 docs/lakehouse/datalake-building/hive-build.md |  7 +-
 .../Manipulation/EXPORT.md |  2 +
 .../Data-Manipulation-Statements/OUTFILE.md|  3 +-
 .../current/data-operate/export/export-overview.md | 24 +++
 .../lakehouse/datalake-building/hive-build.md  | 46 ++
 .../Manipulation/EXPORT.md | 74 --
 .../Data-Manipulation-Statements/OUTFILE.md| 53 
 8 files changed, 109 insertions(+), 102 deletions(-)

diff --git a/docs/data-operate/export/export-overview.md 
b/docs/data-operate/export/export-overview.md
index 3a3a2abf41..fc90d2b6e7 100644
--- a/docs/data-operate/export/export-overview.md
+++ b/docs/data-operate/export/export-overview.md
@@ -105,6 +105,7 @@ The following table shows the mapping between Doris data 
types and Parquet, ORC
 | struct | struct |
 | map | map |
 | array | array |
+|json| Not support|
 
 2. When Doris exports to Parquet file format, it first converts Doris 
in-memory data to Arrow in-memory data format, then writes out to Parquet file 
format. The mapping relationship between Doris data types and Arrow data types 
is:
 
@@ -127,3 +128,4 @@ The following table shows the mapping between Doris data 
types and Parquet, ORC
 | struct | struct |
 | map | map |
 | array | list |
+|json| utf8 |
diff --git a/docs/lakehouse/datalake-building/hive-build.md 
b/docs/lakehouse/datalake-building/hive-build.md
index 6866bcae2e..3baa987ca4 100644
--- a/docs/lakehouse/datalake-building/hive-build.md
+++ b/docs/lakehouse/datalake-building/hive-build.md
@@ -144,7 +144,6 @@ This is an experimental feature.
 
 :::tip
 
-对于某些默认开启 ACID 事务特性的 Hive 集群,使用 Doris 建表后,表属性 `transactional` 会为 true。而 
Doris 只支持部分 Hive 事务表的特性,因此可能会导致 Doris 创建的 Hive,Doris 
本身无法读取的问题。因此,需要在建表的属性中,显式增加:`"transactional" = "false"`,来创建非事务的 Hive 表:
 For some Hive clusters that enable ACID transaction features by default, 
after using Doris to create a table, the table attribute `transactional` will 
be true. However, Doris only supports some features of Hive transaction tables, 
which may cause the problem that Doris itself cannot read the Hive created by 
Doris. Therefore, it is necessary to explicitly add: `"transactional" = 
"false"` in the table creation properties to create a non-transactional Hive 
table:
 
 ```
@@ -205,8 +204,8 @@ This is an experimental feature.
 
 - Compression Formats
 
-- Parquet: snappy(default), zlib, zstd
-- ORC: snappy, zlib(default), zstd
+- Parquet: snappy(default), zstd, plain. (plain means no compression is 
used.)
+- ORC: snappy, zlib(default), zstd, plain. (plain means no compression is 
used.)
 
 - Storage Medium
 
@@ -263,7 +262,7 @@ CREATE TABLE hive.hive_db.hive_ctas (col1,col2,pt1) 
ENGINE=hive
 PARTITION BY LIST (pt1) ()
 PROPERTIES (
 "file_format"="parquet",
-"parquet.compression"="zstd"
+"compression"="zstd"
 )
 AS SELECT col1,pt1 as col2,pt2 as pt1 FROM test_ctas.part_ctas_src WHERE 
col1>0;
 ```
diff --git 
a/docs/sql-manual/sql-statements/Data-Manipulation-Statements/Manipulation/EXPORT.md
 
b/docs/sql-manual/sql-statements/Data-Manipulation-Statements/Manipulation/EXPORT.md
index c6479d0e82..d913c48c6e 100644
--- 
a/docs/sql-manual/sql-statements/Data-Manipulation-Statements/Manipulation/EXPORT.md
+++ 
b/docs/sql-manual/sql-statements/Data-Manipulation-Statements/Manipulation/EXPORT.md
@@ -98,6 +98,8 @@ The bottom layer of the `Export` statement actually executes 
the `select...outfi
 
   - `timeout`: This is the timeout parameter of the export job, the default 
timeout is 2 hours, and the unit is seconds.
 
+  - `compress_type`: When specifying the export file format as Parquet or ORC, 
you can choose the compression method for the Parquet or ORC files. For Parquet 
file format, you can specify the compression method as SNAPPY, GZIP, BROTLI, 
ZSTD, LZ4, or PLAIN, with the default being SNAPPY. For ORC file format, you 
can specify the compression method as PLAIN, SNAPPY, ZLIB, or ZSTD, with the 
default being ZLIB. This parameter is supported starting from version 2.1.5. 
(PLAIN means no compress

Error while running notifications feature from refs/heads/master:.asf.yaml in doris-website!

2024-06-28 Thread Apache Infrastructure


An error occurred while running notifications feature in .asf.yaml!:
Invalid notification target 'comm...@foo.apache.org'. Must be a valid 
@doris.apache.org list!


-
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org



(doris-website) branch master updated: [admin-set-config] modify admin-set-config doc (#805)

2024-06-28 Thread morningman
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris-website.git


The following commit(s) were added to refs/heads/master by this push:
 new 6284103e14 [admin-set-config] modify admin-set-config doc (#805)
6284103e14 is described below

commit 6284103e14d5b366d22106644ff490dfc1f50ca1
Author: Mingyu Chen 
AuthorDate: Sat Jun 29 14:49:55 2024 +0800

[admin-set-config] modify admin-set-config doc (#805)

Co-authored-by: Luzhijing <82810928+luzhij...@users.noreply.github.com>
---
 .../ADMIN-SET-CONFIG.md| 26 +-
 .../ADMIN-SET-CONFIG.md| 23 +++
 .../ADMIN-SET-CONFIG.md| 22 ++
 .../ADMIN-SET-CONFIG.md| 26 +-
 4 files changed, 57 insertions(+), 40 deletions(-)

diff --git 
a/docs/sql-manual/sql-statements/Database-Administration-Statements/ADMIN-SET-CONFIG.md
 
b/docs/sql-manual/sql-statements/Database-Administration-Statements/ADMIN-SET-CONFIG.md
index f59828f40d..1683a87a11 100644
--- 
a/docs/sql-manual/sql-statements/Database-Administration-Statements/ADMIN-SET-CONFIG.md
+++ 
b/docs/sql-manual/sql-statements/Database-Administration-Statements/ADMIN-SET-CONFIG.md
@@ -32,29 +32,33 @@ ADMIN SET CONFIG
 
 ### Description
 
-This statement is used to set the configuration items of the cluster 
(currently only the configuration items of FE are supported).
-The settable configuration items can be viewed through the SHOW FRONTEND 
CONFIG; command.
+This statement is used to set the configuration items of the cluster 
(currently only supports setting FE configuration items).
 
-grammar:
+The configurable items can be viewed using the `SHOW FRONTEND CONFIG;` command.
+
+Syntax:
 
 ```sql
-  ADMIN SET FRONTEND CONFIG ("key" = "value") [ALL];
-  ADMIN SET ALL FRONTENDS CONFIG ("key" = "value");
-
+ADMIN SET FRONTEND CONFIG ("key" = "value") [ALL];
+-- or
+ADMIN SET ALL FRONTENDS CONFIG ("key" = "value");
+```
 
-illustrate:
+:::tip Explanation
 
-1. If `ALL` keyword is used, this config will be applied to all FE(except 
master_only configuration)
+- Starting from versions 2.0.11 and 2.1.5, the `ALL` keyword is supported. 
When using the `ALL` keyword, the configuration parameters will be applied to 
all FEs (except for the `master_only` parameter).
+- This syntax does not persistently modify the configuration. After an FE 
restarts, the modified configuration becomes invalid. To persist the changes, 
the configuration items need to be synchronously added in fe.conf.
+:::
 
 ### Example
 
-1. Set 'disable_balance' to true
+1. Set `disable_balance` to true
 
-ADMIN SET FRONTEND CONFIG ("disable_balance" = "true");
+`ADMIN SET FRONTEND CONFIG ("disable_balance" = "true");`
 
 ### Keywords
 
-ADMIN, SET, CONFIG
+ADMIN, SET, CONFIG
 
 ### Best Practice
 
diff --git 
a/i18n/zh-CN/docusaurus-plugin-content-docs/current/sql-manual/sql-statements/Database-Administration-Statements/ADMIN-SET-CONFIG.md
 
b/i18n/zh-CN/docusaurus-plugin-content-docs/current/sql-manual/sql-statements/Database-Administration-Statements/ADMIN-SET-CONFIG.md
index 0663934e77..29946e64de 100644
--- 
a/i18n/zh-CN/docusaurus-plugin-content-docs/current/sql-manual/sql-statements/Database-Administration-Statements/ADMIN-SET-CONFIG.md
+++ 
b/i18n/zh-CN/docusaurus-plugin-content-docs/current/sql-manual/sql-statements/Database-Administration-Statements/ADMIN-SET-CONFIG.md
@@ -32,28 +32,33 @@ ADMIN SET CONFIG
 
 ### Description
 
-该语句用于设置集群的配置项(当前仅支持设置FE的配置项)。
+该语句用于设置集群的配置项(当前仅支持设置 FE 的配置项)。
+
 可设置的配置项,可以通过 `SHOW FRONTEND CONFIG;` 命令查看。
 
 语法:
 
 ```sql
-  ADMIN SET FRONTEND CONFIG ("key" = "value") [ALL];
-  ADMIN SET ALL FRONTENDS CONFIG ("key" = "value");```
+ADMIN SET FRONTEND CONFIG ("key" = "value") [ALL];
+-- or
+ADMIN SET ALL FRONTENDS CONFIG ("key" = "value");
+```
 
-说明:  
+:::tip 提示 
   
-1. 使用ALL关键字后配置参数将应用于所有FE(除 master_only 参数外)
-
+- 2.0.11 和 2.1.5 版本开始支持 `ALL` 关键词。使用 `ALL` 关键字后配置参数将应用于所有 FE(除 `master_only` 
参数外)。
+- 该语法不会持久化修改的配置,FE 重启后,修改的配置失效。如需持久化,需要在 fe.conf 中同步添加配置项。
+- 
+:::
+
 ### Example
 
-1. 设置 'disable_balance' 为 true
+1. 设置 `disable_balance` 为 true
 
-ADMIN SET FRONTEND CONFIG ("disable_balance" = "true");
+`ADMIN SET FRONTEND CONFIG ("disable_balance" = "true");`
 
 ### Keywords
 
 ADMIN, SET, CONFIG
 
 ### Best Practice
-
diff --git 
a/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.1/sql-manual/sql-statements/Database-Administration-Statements/ADMIN-SET-CONFIG.md
 
b/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.1/sql-manual/sql-statements/Database-Administration-Statements/ADMIN-SET-CONFIG.md
index 1b4feac596..6f849f70dd 100644
--- 
a/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.1/sql-manual/sql-statements/Database-Administration-Statements/ADMIN-SET-CONFIG.md
++