This is an automated email from the ASF dual-hosted git repository.
wangbo pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-2.1 by this push:
new 43ebfa9031e [branch-2.1] pick some workload group pr (#44634)
43ebfa9031e is described below
commit 43ebfa9031e5ea8066403f223ecf4cbad0602224
Author: wangbo <[email protected]>
AuthorDate: Wed Nov 27 09:42:41 2024 +0800
[branch-2.1] pick some workload group pr (#44634)
pick #42053 #43942
---
be/src/olap/delta_writer.cpp | 7 +-
be/src/olap/delta_writer_v2.cpp | 7 +-
be/src/olap/memtable_flush_executor.cpp | 28 ++-
be/src/olap/memtable_flush_executor.h | 13 +-
be/src/olap/memtable_writer.cpp | 11 +-
be/src/olap/memtable_writer.h | 3 +-
be/src/runtime/query_context.h | 2 +-
be/src/runtime/workload_group/workload_group.cpp | 12 +-
be/src/runtime/workload_group/workload_group.h | 6 +
.../resource/workloadgroup/WorkloadGroup.java | 223 +++++++++++----------
.../resource/workloadgroup/WorkloadGroupMgr.java | 12 +-
.../resource/workloadgroup/WorkloadGroupTest.java | 4 +-
.../data/workload_manager_p0/test_curd_wlg.out | 26 ++-
.../workload_manager_p0/test_curd_wlg.groovy | 107 +++++++---
14 files changed, 270 insertions(+), 191 deletions(-)
diff --git a/be/src/olap/delta_writer.cpp b/be/src/olap/delta_writer.cpp
index bf7adadb943..9cd7ca18b9e 100644
--- a/be/src/olap/delta_writer.cpp
+++ b/be/src/olap/delta_writer.cpp
@@ -102,10 +102,15 @@ Status BaseDeltaWriter::init() {
if (_is_init) {
return Status::OK();
}
+ auto* t_ctx = doris::thread_context(true);
+ std::shared_ptr<WorkloadGroup> wg_sptr = nullptr;
+ if (t_ctx) {
+ wg_sptr = t_ctx->workload_group().lock();
+ }
RETURN_IF_ERROR(_rowset_builder->init());
RETURN_IF_ERROR(_memtable_writer->init(
_rowset_builder->rowset_writer(), _rowset_builder->tablet_schema(),
- _rowset_builder->get_partial_update_info(), nullptr,
+ _rowset_builder->get_partial_update_info(), wg_sptr,
_rowset_builder->tablet()->enable_unique_key_merge_on_write()));
ExecEnv::GetInstance()->memtable_memory_limiter()->register_writer(_memtable_writer);
_is_init = true;
diff --git a/be/src/olap/delta_writer_v2.cpp b/be/src/olap/delta_writer_v2.cpp
index 32baa93be1f..aaa35a0c2a9 100644
--- a/be/src/olap/delta_writer_v2.cpp
+++ b/be/src/olap/delta_writer_v2.cpp
@@ -126,13 +126,12 @@ Status DeltaWriterV2::init() {
_rowset_writer = std::make_shared<BetaRowsetWriterV2>(_streams);
RETURN_IF_ERROR(_rowset_writer->init(context));
- ThreadPool* wg_thread_pool_ptr = nullptr;
+ std::shared_ptr<WorkloadGroup> wg_sptr = nullptr;
if (_state->get_query_ctx()) {
- wg_thread_pool_ptr =
_state->get_query_ctx()->get_memtable_flush_pool();
+ wg_sptr = _state->get_query_ctx()->workload_group();
}
RETURN_IF_ERROR(_memtable_writer->init(_rowset_writer, _tablet_schema,
_partial_update_info,
- wg_thread_pool_ptr,
-
_streams[0]->enable_unique_mow(_req.index_id)));
+ wg_sptr,
_streams[0]->enable_unique_mow(_req.index_id)));
ExecEnv::GetInstance()->memtable_memory_limiter()->register_writer(_memtable_writer);
_is_init = true;
_streams.clear();
diff --git a/be/src/olap/memtable_flush_executor.cpp
b/be/src/olap/memtable_flush_executor.cpp
index 704e8b280bc..30d06f70126 100644
--- a/be/src/olap/memtable_flush_executor.cpp
+++ b/be/src/olap/memtable_flush_executor.cpp
@@ -101,7 +101,16 @@ Status FlushToken::submit(std::unique_ptr<MemTable>
mem_table) {
auto task = MemtableFlushTask::create_shared(shared_from_this(),
std::move(mem_table),
_rowset_writer->allocate_segment_id(),
submit_task_time);
- Status ret = _thread_pool->submit(std::move(task));
+ // NOTE: we should guarantee WorkloadGroup is not deconstructed when
submit memtable flush task.
+ // because currently WorkloadGroup's can only be destroyed when all
queries in the group is finished,
+ // but not consider whether load channel is finish.
+ std::shared_ptr<WorkloadGroup> wg_sptr = _wg_wptr.lock();
+ ThreadPool* wg_thread_pool = nullptr;
+ if (wg_sptr) {
+ wg_thread_pool = wg_sptr->get_memtable_flush_pool_ptr();
+ }
+ Status ret = wg_thread_pool ? wg_thread_pool->submit(std::move(task))
+ : _thread_pool->submit(std::move(task));
if (ret.ok()) {
// _wait_running_task_finish was executed after this function, so no
need to notify _cond here
_stats.flush_running_count++;
@@ -233,7 +242,8 @@ void MemTableFlushExecutor::init(const
std::vector<DataDir*>& data_dirs) {
// NOTE: we use SERIAL mode here to ensure all mem-tables from one tablet are
flushed in order.
Status MemTableFlushExecutor::create_flush_token(std::shared_ptr<FlushToken>&
flush_token,
std::shared_ptr<RowsetWriter>
rowset_writer,
- bool is_high_priority) {
+ bool is_high_priority,
+
std::shared_ptr<WorkloadGroup> wg_sptr) {
switch (rowset_writer->type()) {
case ALPHA_ROWSET:
// alpha rowset do not support flush in CONCURRENT. and not support
alpha rowset now.
@@ -241,7 +251,7 @@ Status
MemTableFlushExecutor::create_flush_token(std::shared_ptr<FlushToken>& fl
case BETA_ROWSET: {
// beta rowset can be flush in CONCURRENT, because each memtable using
a new segment writer.
ThreadPool* pool = is_high_priority ? _high_prio_flush_pool.get() :
_flush_pool.get();
- flush_token = FlushToken::create_shared(pool);
+ flush_token = FlushToken::create_shared(pool, wg_sptr);
flush_token->set_rowset_writer(rowset_writer);
return Status::OK();
}
@@ -250,18 +260,6 @@ Status
MemTableFlushExecutor::create_flush_token(std::shared_ptr<FlushToken>& fl
}
}
-Status MemTableFlushExecutor::create_flush_token(std::shared_ptr<FlushToken>&
flush_token,
- std::shared_ptr<RowsetWriter>
rowset_writer,
- ThreadPool*
wg_flush_pool_ptr) {
- if (rowset_writer->type() == BETA_ROWSET) {
- flush_token = FlushToken::create_shared(wg_flush_pool_ptr);
- } else {
- return Status::InternalError<false>("not support alpha rowset load
now.");
- }
- flush_token->set_rowset_writer(rowset_writer);
- return Status::OK();
-}
-
void MemTableFlushExecutor::_register_metrics() {
REGISTER_HOOK_METRIC(flush_thread_pool_queue_size,
[this]() { return _flush_pool->get_queue_size(); });
diff --git a/be/src/olap/memtable_flush_executor.h
b/be/src/olap/memtable_flush_executor.h
index 44ced2a27a9..711c45a67e8 100644
--- a/be/src/olap/memtable_flush_executor.h
+++ b/be/src/olap/memtable_flush_executor.h
@@ -34,6 +34,7 @@ namespace doris {
class DataDir;
class MemTable;
class RowsetWriter;
+class WorkloadGroup;
// the statistic of a certain flush handler.
// use atomic because it may be updated by multi threads
@@ -59,7 +60,8 @@ class FlushToken : public
std::enable_shared_from_this<FlushToken> {
ENABLE_FACTORY_CREATOR(FlushToken);
public:
- FlushToken(ThreadPool* thread_pool) : _flush_status(Status::OK()),
_thread_pool(thread_pool) {}
+ FlushToken(ThreadPool* thread_pool, std::shared_ptr<WorkloadGroup> wg_sptr)
+ : _flush_status(Status::OK()), _thread_pool(thread_pool),
_wg_wptr(wg_sptr) {}
Status submit(std::unique_ptr<MemTable> mem_table);
@@ -108,6 +110,8 @@ private:
std::mutex _mutex;
std::condition_variable _cond;
+
+ std::weak_ptr<WorkloadGroup> _wg_wptr;
};
// MemTableFlushExecutor is responsible for flushing memtables to disk.
@@ -133,11 +137,8 @@ public:
void init(const std::vector<DataDir*>& data_dirs);
Status create_flush_token(std::shared_ptr<FlushToken>& flush_token,
- std::shared_ptr<RowsetWriter> rowset_writer,
bool is_high_priority);
-
- Status create_flush_token(std::shared_ptr<FlushToken>& flush_token,
- std::shared_ptr<RowsetWriter> rowset_writer,
- ThreadPool* wg_flush_pool_ptr);
+ std::shared_ptr<RowsetWriter> rowset_writer,
bool is_high_priority,
+ std::shared_ptr<WorkloadGroup> wg_sptr);
private:
void _register_metrics();
diff --git a/be/src/olap/memtable_writer.cpp b/be/src/olap/memtable_writer.cpp
index 29206a292cd..4410cf3c20b 100644
--- a/be/src/olap/memtable_writer.cpp
+++ b/be/src/olap/memtable_writer.cpp
@@ -65,7 +65,7 @@ MemTableWriter::~MemTableWriter() {
Status MemTableWriter::init(std::shared_ptr<RowsetWriter> rowset_writer,
TabletSchemaSPtr tablet_schema,
std::shared_ptr<PartialUpdateInfo>
partial_update_info,
- ThreadPool* wg_flush_pool_ptr, bool
unique_key_mow) {
+ std::shared_ptr<WorkloadGroup> wg_sptr, bool
unique_key_mow) {
_rowset_writer = rowset_writer;
_tablet_schema = tablet_schema;
_unique_key_mow = unique_key_mow;
@@ -77,13 +77,8 @@ Status MemTableWriter::init(std::shared_ptr<RowsetWriter>
rowset_writer,
// create flush handler
// by assigning segment_id to memtable before submiting to flush executor,
// we can make sure same keys sort in the same order in all replicas.
- if (wg_flush_pool_ptr) {
-
RETURN_IF_ERROR(StorageEngine::instance()->memtable_flush_executor()->create_flush_token(
- _flush_token, _rowset_writer, wg_flush_pool_ptr));
- } else {
-
RETURN_IF_ERROR(StorageEngine::instance()->memtable_flush_executor()->create_flush_token(
- _flush_token, _rowset_writer, _req.is_high_priority));
- }
+
RETURN_IF_ERROR(StorageEngine::instance()->memtable_flush_executor()->create_flush_token(
+ _flush_token, _rowset_writer, _req.is_high_priority, wg_sptr));
_is_init = true;
return Status::OK();
diff --git a/be/src/olap/memtable_writer.h b/be/src/olap/memtable_writer.h
index ee7c8e1538a..bd2e438258f 100644
--- a/be/src/olap/memtable_writer.h
+++ b/be/src/olap/memtable_writer.h
@@ -52,6 +52,7 @@ class SlotDescriptor;
class OlapTableSchemaParam;
class RowsetWriter;
struct FlushStatistic;
+class WorkloadGroup;
namespace vectorized {
class Block;
@@ -69,7 +70,7 @@ public:
Status init(std::shared_ptr<RowsetWriter> rowset_writer, TabletSchemaSPtr
tablet_schema,
std::shared_ptr<PartialUpdateInfo> partial_update_info,
- ThreadPool* wg_flush_pool_ptr, bool unique_key_mow = false);
+ std::shared_ptr<WorkloadGroup> wg_sptr, bool unique_key_mow =
false);
Status write(const vectorized::Block* block, const std::vector<uint32_t>&
row_idxs);
diff --git a/be/src/runtime/query_context.h b/be/src/runtime/query_context.h
index e781ae61cab..1e1f1bf57cf 100644
--- a/be/src/runtime/query_context.h
+++ b/be/src/runtime/query_context.h
@@ -339,7 +339,7 @@ private:
// And will be shared by all instances of this query.
// So that we can control the max thread that a query can be used to
execute.
// If this token is not set, the scanner will be executed in
"_scan_thread_pool" in exec env.
- std::unique_ptr<ThreadPoolToken> _thread_token;
+ std::unique_ptr<ThreadPoolToken> _thread_token {nullptr};
std::mutex _start_lock;
std::condition_variable _start_cond;
diff --git a/be/src/runtime/workload_group/workload_group.cpp
b/be/src/runtime/workload_group/workload_group.cpp
index 07d4177f7f6..39437048326 100644
--- a/be/src/runtime/workload_group/workload_group.cpp
+++ b/be/src/runtime/workload_group/workload_group.cpp
@@ -331,21 +331,21 @@ Status WorkloadGroupInfo::parse_topic_info(const
TWorkloadGroupInfo& tworkload_g
// 4 cpu_share
uint64_t cpu_share = CgroupCpuCtl::cpu_soft_limit_default_value();
- if (tworkload_group_info.__isset.cpu_share) {
+ if (tworkload_group_info.__isset.cpu_share &&
tworkload_group_info.cpu_share > 0) {
cpu_share = tworkload_group_info.cpu_share;
}
workload_group_info->cpu_share = cpu_share;
// 5 cpu hard limit
int cpu_hard_limit = CPU_HARD_LIMIT_DEFAULT_VALUE;
- if (tworkload_group_info.__isset.cpu_hard_limit) {
+ if (tworkload_group_info.__isset.cpu_hard_limit &&
tworkload_group_info.cpu_hard_limit > 0) {
cpu_hard_limit = tworkload_group_info.cpu_hard_limit;
}
workload_group_info->cpu_hard_limit = cpu_hard_limit;
// 6 mem_limit
std::string mem_limit_str = MEMORY_LIMIT_DEFAULT_VALUE;
- if (tworkload_group_info.__isset.mem_limit) {
+ if (tworkload_group_info.__isset.mem_limit &&
tworkload_group_info.mem_limit != "-1") {
mem_limit_str = tworkload_group_info.mem_limit;
}
bool is_percent = true;
@@ -407,14 +407,16 @@ Status WorkloadGroupInfo::parse_topic_info(const
TWorkloadGroupInfo& tworkload_g
// 14 scan io
int read_bytes_per_second = -1;
- if (tworkload_group_info.__isset.read_bytes_per_second) {
+ if (tworkload_group_info.__isset.read_bytes_per_second &&
+ tworkload_group_info.read_bytes_per_second > 0) {
read_bytes_per_second = tworkload_group_info.read_bytes_per_second;
}
workload_group_info->read_bytes_per_second = read_bytes_per_second;
// 15 remote scan io
int remote_read_bytes_per_second = -1;
- if (tworkload_group_info.__isset.remote_read_bytes_per_second) {
+ if (tworkload_group_info.__isset.remote_read_bytes_per_second &&
+ tworkload_group_info.remote_read_bytes_per_second > 0) {
remote_read_bytes_per_second =
tworkload_group_info.remote_read_bytes_per_second;
}
workload_group_info->remote_read_bytes_per_second =
remote_read_bytes_per_second;
diff --git a/be/src/runtime/workload_group/workload_group.h
b/be/src/runtime/workload_group/workload_group.h
index 22b1405eeab..9b27e72c2f9 100644
--- a/be/src/runtime/workload_group/workload_group.h
+++ b/be/src/runtime/workload_group/workload_group.h
@@ -216,6 +216,12 @@ public:
std::weak_ptr<CgroupCpuCtl> get_cgroup_cpu_ctl_wptr();
+ ThreadPool* get_memtable_flush_pool_ptr() {
+ // no lock here because this is called by memtable flush,
+ // to avoid lock competition with the workload thread pool's update
+ return _memtable_flush_pool.get();
+ }
+
private:
void create_cgroup_cpu_ctl_no_lock();
void upsert_cgroup_cpu_ctl_no_lock(WorkloadGroupInfo* wg_info);
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroup.java
b/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroup.java
index a026025c918..5f8ff7829d5 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroup.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroup.java
@@ -35,6 +35,7 @@ import org.apache.doris.thrift.TopicInfo;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
import com.google.gson.annotations.SerializedName;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
@@ -105,6 +106,26 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
public static final int SPILL_LOW_WATERMARK_DEFAULT_VALUE = 50;
public static final int SPILL_HIGH_WATERMARK_DEFAULT_VALUE = 80;
+ private static final Map<String, String> ALL_PROPERTIES_DEFAULT_VALUE_MAP
= Maps.newHashMap();
+
+ static {
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(CPU_SHARE, "-1");
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(CPU_HARD_LIMIT, "-1");
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(MEMORY_LIMIT, "-1");
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(ENABLE_MEMORY_OVERCOMMIT, "true");
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(MAX_CONCURRENCY,
String.valueOf(Integer.MAX_VALUE));
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(MAX_QUEUE_SIZE, "0");
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(QUEUE_TIMEOUT, "0");
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(SCAN_THREAD_NUM, "-1");
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(MAX_REMOTE_SCAN_THREAD_NUM, "-1");
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(MIN_REMOTE_SCAN_THREAD_NUM, "-1");
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(SPILL_THRESHOLD_LOW_WATERMARK,
"50%");
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(SPILL_THRESHOLD_HIGH_WATERMARK,
"80%");
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(TAG, "");
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(READ_BYTES_PER_SECOND, "-1");
+ ALL_PROPERTIES_DEFAULT_VALUE_MAP.put(REMOTE_READ_BYTES_PER_SECOND,
"-1");
+ }
+
@SerializedName(value = "id")
private long id;
@@ -135,9 +156,7 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
this.properties = properties;
this.version = version;
if (properties.containsKey(MEMORY_LIMIT)) {
- String memoryLimitString = properties.get(MEMORY_LIMIT);
- this.memoryLimitPercent = Double.parseDouble(
- memoryLimitString.substring(0, memoryLimitString.length()
- 1));
+ setMemLimitPercent(properties);
}
if (properties.containsKey(ENABLE_MEMORY_OVERCOMMIT)) {
properties.put(ENABLE_MEMORY_OVERCOMMIT,
properties.get(ENABLE_MEMORY_OVERCOMMIT).toLowerCase());
@@ -221,15 +240,22 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
}
if (properties.containsKey(CPU_SHARE)) {
- String cpuShare = properties.get(CPU_SHARE);
- if (!StringUtils.isNumeric(cpuShare) || Long.parseLong(cpuShare)
<= 0) {
- throw new DdlException(CPU_SHARE + " " + cpuShare + " requires
a positive integer.");
+ String inputValue = properties.get(CPU_SHARE);
+ try {
+ int cpuShareI = Integer.parseInt(inputValue);
+ if (cpuShareI <= 0 && cpuShareI != -1) {
+ throw new NumberFormatException();
+ }
+ } catch (NumberFormatException e) {
+ throw new DdlException(
+ "The allowed " + CPU_SHARE + " value is -1 or a
positive integer, but input value is "
+ + inputValue);
}
}
if (properties.containsKey(CPU_HARD_LIMIT)) {
- String cpuHardLimit = properties.get(CPU_HARD_LIMIT);
- String originValue = cpuHardLimit;
+ String inputValue = properties.get(CPU_HARD_LIMIT);
+ String cpuHardLimit = inputValue;
try {
boolean endWithSign = false;
if (cpuHardLimit.endsWith("%")) {
@@ -246,33 +272,39 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
}
} catch (NumberFormatException e) {
throw new DdlException(
- "workload group's " + WorkloadGroup.CPU_HARD_LIMIT
- + " must be a positive integer[1,100] or -1,
but input value is " + originValue);
+ "The allowed " + WorkloadGroup.CPU_HARD_LIMIT
+ + " value is -1 or a positive integer between
1 and 100, but input value is "
+ + inputValue);
}
}
if (properties.containsKey(MEMORY_LIMIT)) {
- String memoryLimit = properties.get(MEMORY_LIMIT);
- if (!memoryLimit.endsWith("%")) {
- throw new DdlException(MEMORY_LIMIT + " " + memoryLimit + "
requires a percentage and ends with a '%'");
+ String memoryLimitStr = properties.get(MEMORY_LIMIT);
+ if (!memoryLimitStr.endsWith("%") && !"-1".equals(memoryLimitStr))
{
+ throw new DdlException(
+ MEMORY_LIMIT + " requires a percentage value which
ends with a '%' or -1, but input value is "
+ + memoryLimitStr);
}
- String memLimitErr = MEMORY_LIMIT + " " + memoryLimit + " requires
a positive floating point number.";
- try {
- if (Double.parseDouble(memoryLimit.substring(0,
memoryLimit.length() - 1)) <= 0) {
- throw new DdlException(memLimitErr);
- }
- } catch (NumberFormatException e) {
- if (LOG.isDebugEnabled()) {
- LOG.debug(memLimitErr, e);
+ if (!"-1".equals(memoryLimitStr)) {
+ try {
+ double memLimitD =
Double.parseDouble(memoryLimitStr.substring(0, memoryLimitStr.length() - 1));
+ if (memLimitD <= 0) {
+ throw new NumberFormatException();
+ }
+ } catch (NumberFormatException e) {
+ throw new DdlException("The allowed " + MEMORY_LIMIT
+ + " value is a positive floating point number, but
input value is " + memoryLimitStr);
}
- throw new DdlException(memLimitErr);
}
}
if (properties.containsKey(ENABLE_MEMORY_OVERCOMMIT)) {
- String value =
properties.get(ENABLE_MEMORY_OVERCOMMIT).toLowerCase();
+ String inputValue = properties.get(ENABLE_MEMORY_OVERCOMMIT);
+ String value = inputValue.toLowerCase();
if (!("true".equals(value) || "false".equals(value))) {
- throw new DdlException("The value of '" +
ENABLE_MEMORY_OVERCOMMIT + "' must be true or false.");
+ throw new DdlException(
+ "The value of '" + ENABLE_MEMORY_OVERCOMMIT + "' must
be true or false. but input value is "
+ + inputValue);
}
}
@@ -285,7 +317,8 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
}
} catch (NumberFormatException e) {
throw new DdlException(
- SCAN_THREAD_NUM + " must be a positive integer or -1.
but input value is " + value);
+ "The allowed " + SCAN_THREAD_NUM + " value is -1 or a
positive integer. but input value is "
+ + value);
}
}
@@ -300,7 +333,8 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
maxRemoteScanNum = intValue;
} catch (NumberFormatException e) {
throw new DdlException(
- MAX_REMOTE_SCAN_THREAD_NUM + " must be a positive
integer or -1. but input value is " + value);
+ "The allowed " + MAX_REMOTE_SCAN_THREAD_NUM
+ + " value is -1 or a positive integer. but
input value is " + value);
}
}
@@ -315,7 +349,8 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
minRemoteScanNum = intValue;
} catch (NumberFormatException e) {
throw new DdlException(
- MIN_REMOTE_SCAN_THREAD_NUM + " must be a positive
integer or -1. but input value is " + value);
+ "The allowed " + MIN_REMOTE_SCAN_THREAD_NUM
+ + " value is -1 or a positive integer. but
input value is " + value);
}
}
@@ -328,30 +363,42 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
// check queue property
if (properties.containsKey(MAX_CONCURRENCY)) {
+ String inputValue = properties.get(MAX_CONCURRENCY);
try {
- if (Integer.parseInt(properties.get(MAX_CONCURRENCY)) < 0) {
- throw new DdlException(MAX_CONCURRENCY + " requires a
positive integer");
+ if (Integer.parseInt(inputValue) < 0) {
+ throw new NumberFormatException();
}
} catch (NumberFormatException e) {
- throw new DdlException(MAX_CONCURRENCY + " requires a positive
integer");
+ throw new DdlException(
+ "The allowed " + MAX_CONCURRENCY
+ + " value is an integer greater than or equal
to 0, but input value is "
+ + inputValue);
}
}
if (properties.containsKey(MAX_QUEUE_SIZE)) {
+ String inputValue = properties.get(MAX_QUEUE_SIZE);
try {
- if (Integer.parseInt(properties.get(MAX_QUEUE_SIZE)) < 0) {
- throw new DdlException(MAX_QUEUE_SIZE + " requires a
positive integer");
+ if (Integer.parseInt(inputValue) < 0) {
+ throw new NumberFormatException();
}
} catch (NumberFormatException e) {
- throw new DdlException(MAX_QUEUE_SIZE + " requires a positive
integer");
+ throw new DdlException(
+ "The allowed " + MAX_QUEUE_SIZE
+ + " value is an integer greater than or equal
to 0, but input value is "
+ + inputValue);
}
}
if (properties.containsKey(QUEUE_TIMEOUT)) {
+ String inputValue = properties.get(QUEUE_TIMEOUT);
try {
- if (Integer.parseInt(properties.get(QUEUE_TIMEOUT)) < 0) {
- throw new DdlException(QUEUE_TIMEOUT + " requires a
positive integer");
+ if (Integer.parseInt(inputValue) < 0) {
+ throw new NumberFormatException();
}
} catch (NumberFormatException e) {
- throw new DdlException(QUEUE_TIMEOUT + " requires a positive
integer");
+ throw new DdlException(
+ "The allowed " + QUEUE_TIMEOUT
+ + " value is an integer greater than or equal
to 0, but input value is "
+ + inputValue);
}
}
@@ -363,14 +410,14 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
}
try {
int intValue = Integer.parseInt(lowVal);
- if ((intValue < 1 || intValue > 100) && intValue != -1) {
+ if ((intValue < 1 || intValue > 100)) {
throw new NumberFormatException();
}
lowWaterMark = intValue;
} catch (NumberFormatException e) {
throw new DdlException(
- SPILL_THRESHOLD_LOW_WATERMARK
- + " must be a positive integer(1 ~ 100) or -1.
but input value is "
+ "The allowed " + SPILL_THRESHOLD_LOW_WATERMARK
+ + " value is an integer value between 1 and
100, but input value is "
+ lowVal);
}
}
@@ -389,7 +436,8 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
highWaterMark = intValue;
} catch (NumberFormatException e) {
throw new DdlException(
- SPILL_THRESHOLD_HIGH_WATERMARK + " must be a positive
integer(1 ~ 100). but input value is "
+ "The allowed " + SPILL_THRESHOLD_HIGH_WATERMARK
+ + " value is an integer value between 1 and
100, but input value is "
+ highVal);
}
}
@@ -403,13 +451,13 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
String readBytesVal = properties.get(READ_BYTES_PER_SECOND);
try {
long longVal = Long.parseLong(readBytesVal);
- boolean isValidValue = longVal == -1 || longVal > 0;
- if (!isValidValue) {
+ if (longVal <= 0 && longVal != -1) {
throw new NumberFormatException();
}
} catch (NumberFormatException e) {
throw new DdlException(
- READ_BYTES_PER_SECOND + " should be -1 or an integer
value bigger than 0, but input value is "
+ "The allowed " + READ_BYTES_PER_SECOND
+ + " value should be -1 or an positive integer,
but input value is "
+ readBytesVal);
}
}
@@ -418,13 +466,12 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
String readBytesVal = properties.get(REMOTE_READ_BYTES_PER_SECOND);
try {
long longVal = Long.parseLong(readBytesVal);
- boolean isValidValue = longVal == -1 || longVal > 0;
- if (!isValidValue) {
+ if (longVal <= 0 && longVal != -1) {
throw new NumberFormatException();
}
} catch (NumberFormatException e) {
- throw new DdlException(REMOTE_READ_BYTES_PER_SECOND
- + " should be -1 or an integer value bigger than 0,
but input value is " + readBytesVal);
+ throw new DdlException("The allowed " +
REMOTE_READ_BYTES_PER_SECOND
+ + " value should be -1 or an positive integer, but
input value is " + readBytesVal);
}
}
@@ -477,10 +524,6 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
return version;
}
- public double getMemoryLimitPercent() {
- return memoryLimitPercent;
- }
-
public int getMaxConcurrency() {
return maxConcurrency;
}
@@ -501,70 +544,36 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
// skip id,name,running query,waiting query
for (int i = 2; i <
WorkloadGroupMgr.WORKLOAD_GROUP_PROC_NODE_TITLE_NAMES.size(); i++) {
String key =
WorkloadGroupMgr.WORKLOAD_GROUP_PROC_NODE_TITLE_NAMES.get(i);
- if (CPU_HARD_LIMIT.equals(key)) {
- String val = properties.get(key);
- if (StringUtils.isEmpty(val)) { // cpu_hard_limit is not
required
- row.add("-1");
- } else if ("-1".equals(val)) {
- row.add(val);
- } else {
- row.add(val + "%");
- }
- } else if (CPU_SHARE.equals(key) && !properties.containsKey(key)) {
- row.add("-1");
- } else if (MEMORY_LIMIT.equals(key) &&
!properties.containsKey(key)) {
- row.add("0%");
- } else if (ENABLE_MEMORY_OVERCOMMIT.equals(key) &&
!properties.containsKey(key)) {
- row.add("true");
- } else if (SCAN_THREAD_NUM.equals(key) &&
!properties.containsKey(key)) {
- row.add("-1");
- } else if (MAX_REMOTE_SCAN_THREAD_NUM.equals(key) &&
!properties.containsKey(key)) {
- row.add("-1");
- } else if (MIN_REMOTE_SCAN_THREAD_NUM.equals(key) &&
!properties.containsKey(key)) {
- row.add("-1");
- } else if (SPILL_THRESHOLD_LOW_WATERMARK.equals(key)) {
- String val = properties.get(key);
- if (StringUtils.isEmpty(val)) {
- row.add(SPILL_LOW_WATERMARK_DEFAULT_VALUE + "%");
- } else if ("-1".equals(val)) {
- row.add("-1");
- } else {
- row.add(val + "%");
- }
- } else if (SPILL_THRESHOLD_HIGH_WATERMARK.equals(key)) {
- String val = properties.get(key);
- if (StringUtils.isEmpty(val)) {
- row.add(SPILL_HIGH_WATERMARK_DEFAULT_VALUE + "%");
- } else {
- row.add(val + "%");
- }
- } else if (QueryQueue.RUNNING_QUERY_NUM.equals(key)) {
+ if (QueryQueue.RUNNING_QUERY_NUM.equals(key)) {
row.add(queryQueueDetail == null ? "0" :
String.valueOf(queryQueueDetail.first));
} else if (QueryQueue.WAITING_QUERY_NUM.equals(key)) {
row.add(queryQueueDetail == null ? "0" :
String.valueOf(queryQueueDetail.second));
- } else if (TAG.equals(key)) {
- String val = properties.get(key);
- if (StringUtils.isEmpty(val)) {
- row.add("");
- } else {
- row.add(val);
- }
- } else if (READ_BYTES_PER_SECOND.equals(key) ||
REMOTE_READ_BYTES_PER_SECOND.equals(key)) {
+ } else {
String val = properties.get(key);
if (StringUtils.isEmpty(val)) {
- row.add("-1");
+ row.add(ALL_PROPERTIES_DEFAULT_VALUE_MAP.get(key));
+ } else if ((CPU_HARD_LIMIT.equals(key) && !"-1".equals(val))
+ || SPILL_THRESHOLD_LOW_WATERMARK.equals(key)
+ || SPILL_THRESHOLD_HIGH_WATERMARK.equals(key)) {
+ row.add(val + "%");
} else {
row.add(val);
}
- } else {
- row.add(properties.get(key));
}
}
result.addRow(row);
}
- public int getCpuHardLimit() {
- return cpuHardLimit;
+ public int getCpuHardLimitWhenCalSum() {
+ return cpuHardLimit == -1 ? 0 : cpuHardLimit;
+ }
+
+ public double getMemoryLimitPercentWhenCalSum() {
+ return memoryLimitPercent == -1 ? 0 : memoryLimitPercent;
+ }
+
+ public double getMemoryLimitPercent() {
+ return memoryLimitPercent;
}
public Optional<Set<String>> getTag() {
@@ -686,12 +695,16 @@ public class WorkloadGroup implements Writable,
GsonPostProcessable {
return GsonUtils.GSON.fromJson(json, WorkloadGroup.class);
}
+ void setMemLimitPercent(Map<String, String> props) {
+ String memoryLimitString = props.get(MEMORY_LIMIT);
+ this.memoryLimitPercent = "-1".equals(memoryLimitString) ? -1
+ : Double.parseDouble(memoryLimitString.substring(0,
memoryLimitString.length() - 1));
+ }
+
@Override
public void gsonPostProcess() throws IOException {
if (properties.containsKey(MEMORY_LIMIT)) {
- String memoryLimitString = properties.get(MEMORY_LIMIT);
- this.memoryLimitPercent =
Double.parseDouble(memoryLimitString.substring(0,
- memoryLimitString.length() - 1));
+ setMemLimitPercent(properties);
}
if (properties.containsKey(CPU_HARD_LIMIT)) {
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroupMgr.java
b/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroupMgr.java
index d21bcc5ace5..13be35ce4f0 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroupMgr.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroupMgr.java
@@ -426,17 +426,17 @@ public class WorkloadGroupMgr extends MasterDaemon
implements Writable, GsonPost
continue;
}
- if (wg.getCpuHardLimit() > 0) {
- sumOfAllCpuHardLimit += wg.getCpuHardLimit();
+ if (wg.getCpuHardLimitWhenCalSum() > 0) {
+ sumOfAllCpuHardLimit += wg.getCpuHardLimitWhenCalSum();
}
- if (wg.getMemoryLimitPercent() > 0) {
- sumOfAllMemLimit += wg.getMemoryLimitPercent();
+ if (wg.getMemoryLimitPercentWhenCalSum() > 0) {
+ sumOfAllMemLimit += wg.getMemoryLimitPercentWhenCalSum();
}
}
// 2 sum current wg value
- sumOfAllMemLimit += newWg.getMemoryLimitPercent();
- sumOfAllCpuHardLimit += newWg.getCpuHardLimit();
+ sumOfAllMemLimit += newWg.getMemoryLimitPercentWhenCalSum();
+ sumOfAllCpuHardLimit += newWg.getCpuHardLimitWhenCalSum();
// 3 check total sum
if (sumOfAllMemLimit > 100.0 + 1e-6) {
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/resource/workloadgroup/WorkloadGroupTest.java
b/fe/fe-core/src/test/java/org/apache/doris/resource/workloadgroup/WorkloadGroupTest.java
index f99fd4f3526..121f46fe75b 100644
---
a/fe/fe-core/src/test/java/org/apache/doris/resource/workloadgroup/WorkloadGroupTest.java
+++
b/fe/fe-core/src/test/java/org/apache/doris/resource/workloadgroup/WorkloadGroupTest.java
@@ -62,7 +62,7 @@ public class WorkloadGroupTest {
WorkloadGroup.create(name1, properties1);
Assert.fail();
} catch (DdlException e) {
- Assert.assertTrue(e.getMessage().contains("requires a positive
integer."));
+ Assert.assertTrue(e.getMessage().contains("value is -1 or a
positive integer"));
}
properties1.put(WorkloadGroup.CPU_SHARE, "cpu");
@@ -70,7 +70,7 @@ public class WorkloadGroupTest {
WorkloadGroup.create(name1, properties1);
Assert.fail();
} catch (DdlException e) {
- Assert.assertTrue(e.getMessage().contains("requires a positive
integer."));
+ Assert.assertTrue(e.getMessage().contains("value is -1 or a
positive integer"));
}
}
diff --git a/regression-test/data/workload_manager_p0/test_curd_wlg.out
b/regression-test/data/workload_manager_p0/test_curd_wlg.out
index 0914cd53ea4..aa922615dfe 100644
--- a/regression-test/data/workload_manager_p0/test_curd_wlg.out
+++ b/regression-test/data/workload_manager_p0/test_curd_wlg.out
@@ -11,7 +11,7 @@ test_group 10 10% true 2147483647 0
0 -1 -1 -1 -1
-- !show_del_wg_1 --
normal 20 50% true 2147483647 0 0 1% 16
-test_drop_wg 10 0% true 2147483647 0 0 -1
-1
+test_drop_wg 10 -1 true 2147483647 0 0 -1
-1
test_group 10 10% true 2147483647 0 0 -1
-1
-- !show_del_wg_2 --
@@ -54,24 +54,21 @@ normal 20 50% true 2147483647 0
0 1% 16
test_group 10 11% false 100 0 0 20% -1
-- !show_spill_1 --
-spill_group_test -1 0% true 2147483647 0 0
-1 -1 10% 10%
-
--- !show_spill_1 --
-spill_group_test -1 0% true 2147483647 0 0
-1 -1 -1 10%
+spill_group_test -1 -1 true 2147483647 0 0
-1 -1 10% 10%
-- !show_spill_2 --
-spill_group_test -1 0% true 2147483647 0 0
-1 -1 5% 10%
+spill_group_test -1 -1 true 2147483647 0 0
-1 -1 5% 10%
-- !show_spill_3 --
-spill_group_test -1 0% true 2147483647 0 0
-1 -1 5% 40%
+spill_group_test -1 -1 true 2147483647 0 0
-1 -1 5% 40%
-- !show_wg_tag --
tag1_mem_wg1 50% -1 mem_tag1
tag1_mem_wg2 49% -1 mem_tag1
tag1_mem_wg3 1% -1 mem_tag1
-tag1_wg1 0% 10% tag1
-tag1_wg2 0% 10% tag1
-tag1_wg3 0% 80% tag1
+tag1_wg1 -1 10% tag1
+tag1_wg2 -1 10% tag1
+tag1_wg3 -1 80% tag1
-- !select_remote_scan_num --
20 10
@@ -146,3 +143,12 @@ test_wg_priv_role1 test_wg_priv_g1 Usage_priv NO
-- !select_wgp_12 --
+-- !select_default_val_wg_1 --
+default_val_wg -1 -1 true 2147483647 0 0 -1
-1 -1 -1 50% 80% -1 -1
+
+-- !select_default_val_wg_2 --
+default_val_wg 1024 1% true 100 1 123 1% 1
12 10 50% 80% abc 123 10
+
+-- !select_default_val_wg_3 --
+default_val_wg -1 -1 true 2147483647 0 0 -1
-1 -1 -1 50% 80% -1 -1
+
diff --git a/regression-test/suites/workload_manager_p0/test_curd_wlg.groovy
b/regression-test/suites/workload_manager_p0/test_curd_wlg.groovy
index 96de9535314..43bffe61413 100644
--- a/regression-test/suites/workload_manager_p0/test_curd_wlg.groovy
+++ b/regression-test/suites/workload_manager_p0/test_curd_wlg.groovy
@@ -31,6 +31,7 @@ suite("test_crud_wlg") {
sql "drop workload group if exists tag1_mem_wg1;"
sql "drop workload group if exists tag1_mem_wg2;"
sql "drop workload group if exists tag1_mem_wg3;"
+ sql "drop workload group if exists tag1_mem_wg4;"
sql "drop workload group if exists bypass_group;"
sql """
@@ -101,13 +102,13 @@ suite("test_crud_wlg") {
test {
sql "alter workload group normal properties ( 'cpu_share'='-2' );"
- exception "requires a positive integer"
+ exception "The allowed cpu_share value is -1 or a positive integer"
}
test {
sql "alter workload group normal properties ( 'scan_thread_num'='0' );"
- exception "scan_thread_num must be a positive integer or -1"
+ exception "The allowed scan_thread_num value is -1 or a positive
integer"
}
sql "drop workload group if exists test_group;"
@@ -157,7 +158,7 @@ suite("test_crud_wlg") {
test {
sql "alter workload group test_group properties (
'cpu_hard_limit'='101%' );"
- exception "must be a positive integer"
+ exception "The allowed cpu_hard_limit value is -1 or a positive
integer"
}
sql "alter workload group test_group properties ( 'cpu_hard_limit'='99%'
);"
@@ -200,39 +201,39 @@ suite("test_crud_wlg") {
test {
sql "alter workload group test_group properties (
'max_concurrency'='-1' );"
- exception "requires a positive integer"
+ exception "The allowed max_concurrency value is an integer greater
than or equal to 0"
}
test {
sql "alter workload group test_group properties (
'max_queue_size'='-1' );"
- exception "requires a positive integer"
+ exception "The allowed max_queue_size value is an integer greater than
or equal to 0"
}
test {
sql "alter workload group test_group properties ( 'queue_timeout'='-1'
);"
- exception "requires a positive integer"
+ exception "The allowed queue_timeout value is an integer greater than
or equal to 0"
}
test {
sql "alter workload group test_group
properties('read_bytes_per_second'='0')"
- exception "an integer value bigger than"
+ exception "The allowed read_bytes_per_second value should be -1 or an
positive integer"
}
test {
sql "alter workload group test_group
properties('read_bytes_per_second'='-2')"
- exception "an integer value bigger than"
+ exception "The allowed read_bytes_per_second value should be -1 or an
positive integer"
}
test {
sql "alter workload group test_group
properties('remote_read_bytes_per_second'='0')"
- exception "an integer value bigger than"
+ exception "The allowed remote_read_bytes_per_second value should be -1
or an positive integer"
}
test {
sql "alter workload group test_group
properties('remote_read_bytes_per_second'='-2')"
- exception "an integer value bigger than"
+ exception "The allowed remote_read_bytes_per_second value should be -1
or an positive integer"
}
sql "alter workload group test_group properties ( 'max_concurrency'='100'
);"
@@ -246,12 +247,12 @@ suite("test_crud_wlg") {
test {
sql "create workload group if not exists test_group2 " +
"properties ( " +
- " 'cpu_share'='-1', " +
+ " 'cpu_share'='-2', " +
" 'memory_limit'='1%', " +
" 'enable_memory_overcommit'='true' " +
");"
- exception "requires a positive integer"
+ exception "The allowed cpu_share value is -1 or a positive integer"
}
// failed for mem_limit
@@ -301,7 +302,7 @@ suite("test_crud_wlg") {
" 'cpu_hard_limit'='120%' " +
");"
- exception "must be a positive integer"
+ exception "a positive integer between 1 and 100"
}
test {
@@ -435,9 +436,6 @@ suite("test_crud_wlg") {
}
// 2 alter low
- sql "alter workload group spill_group_test properties (
'spill_threshold_low_watermark'='-1' );"
- qt_show_spill_1 "select
name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num,spill_threshold_low_watermark,spill_threshold_high_watermark
from information_schema.workload_groups where name in ('spill_group_test');"
-
sql "alter workload group spill_group_test properties (
'spill_threshold_low_watermark'='5%' );"
qt_show_spill_2 "select
name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num,spill_threshold_low_watermark,spill_threshold_high_watermark
from information_schema.workload_groups where name in ('spill_group_test');"
@@ -448,22 +446,22 @@ suite("test_crud_wlg") {
test {
sql "alter workload group spill_group_test properties (
'spill_threshold_low_watermark'='0%' );"
- exception "must be a positive integer"
+ exception "value is an integer value between 1 and 100"
}
test {
sql "alter workload group spill_group_test properties (
'spill_threshold_low_watermark'='101%' );"
- exception "must be a positive integer"
+ exception "value is an integer value between 1 and 100"
}
test {
sql "create workload group if not exists spill_group_test2 properties
( 'spill_threshold_low_watermark'='0%')"
- exception "must be a positive integer"
+ exception "value is an integer value between 1 and 100"
}
test {
sql "create workload group if not exists spill_group_test2 properties
( 'spill_threshold_low_watermark'='101%')"
- exception "must be a positive integer"
+ exception "value is an integer value between 1 and 100"
}
// 3 alter high
@@ -476,22 +474,22 @@ suite("test_crud_wlg") {
test {
sql "alter workload group spill_group_test properties (
'spill_threshold_high_watermark'='0%' );"
- exception "must be a positive integer"
+ exception "value is an integer value between 1 and 100"
}
test {
sql "alter workload group spill_group_test properties (
'spill_threshold_high_watermark'='101%' );"
- exception "must be a positive integer"
+ exception "value is an integer value between 1 and 100"
}
test {
sql "create workload group if not exists spill_group_test2 properties
( 'spill_threshold_high_watermark'='0%')"
- exception "must be a positive integer"
+ exception "value is an integer value between 1 and 100"
}
test {
sql "create workload group if not exists spill_group_test2 properties
( 'spill_threshold_high_watermark'='101%')"
- exception "must be a positive integer"
+ exception "value is an integer value between 1 and 100"
}
sql "drop workload group test_group;"
@@ -506,17 +504,17 @@ suite("test_crud_wlg") {
test {
sql "create workload group if not exists tag1_wg1 properties (
'cpu_hard_limit'='101%', 'tag'='tag1')"
- exception "must be a positive integer"
+ exception "a positive integer between 1 and 100"
}
test {
sql "create workload group if not exists tag1_wg1 properties (
'cpu_hard_limit'='-2%', 'tag'='tag1')"
- exception "must be a positive integer"
+ exception "a positive integer between 1 and 100"
}
test {
sql "create workload group if not exists tag1_wg1 properties (
'cpu_hard_limit'='-1%', 'tag'='tag1')"
- exception "must be a positive integer"
+ exception "a positive integer between 1 and 100"
}
sql "create workload group if not exists tag1_wg1 properties (
'cpu_hard_limit'='10%', 'tag'='tag1');"
@@ -567,6 +565,14 @@ suite("test_crud_wlg") {
sql "alter workload group tag1_mem_wg3 properties ( 'tag'='mem_tag1' );"
+ sql "create workload group tag1_mem_wg4
properties('memory_limit'='-1','tag'='mem_tag1');"
+
+ test {
+ sql "alter workload group tag1_mem_wg4 properties (
'memory_limit'='1%' );"
+ exception "cannot be greater than 100.0%"
+ }
+
+
qt_show_wg_tag "select name,MEMORY_LIMIT,CPU_HARD_LIMIT,TAG from
information_schema.workload_groups where name
in('tag1_wg1','tag1_wg2','tag2_wg1','tag1_wg3','tag1_mem_wg1','tag1_mem_wg2','tag1_mem_wg3')
order by tag,name;"
// test bypass
@@ -651,6 +657,7 @@ suite("test_crud_wlg") {
sql "drop workload group tag1_mem_wg1;"
sql "drop workload group tag1_mem_wg2;"
sql "drop workload group tag1_mem_wg3;"
+ sql "drop workload group tag1_mem_wg4;"
sql "drop workload group bypass_group;"
// test workload group privilege table
@@ -710,4 +717,50 @@ suite("test_crud_wlg") {
qt_select_wgp_12 "select
GRANTEE,WORKLOAD_GROUP_NAME,PRIVILEGE_TYPE,IS_GRANTABLE from
information_schema.workload_group_privileges where grantee like
'%test_wg_priv%' order by
GRANTEE,WORKLOAD_GROUP_NAME,PRIVILEGE_TYPE,IS_GRANTABLE; "
sql "drop workload group test_wg_priv_g1"
+ // test default value
+ sql "drop workload group if exists default_val_wg"
+ sql "create workload group default_val_wg
properties('enable_memory_overcommit'='true');"
+ qt_select_default_val_wg_1 "select
name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num,max_remote_scan_thread_num,min_remote_scan_thread_num,spill_threshold_low_watermark,spill_threshold_high_watermark,tag,read_bytes_per_second,remote_read_bytes_per_second
from information_schema.workload_groups where name = 'default_val_wg'"
+
+ sql """
+ alter workload group default_val_wg properties(
+ 'cpu_share'='1024',
+ 'memory_limit'='1%',
+ 'enable_memory_overcommit'='true',
+ 'max_concurrency'='100',
+ 'max_queue_size'='1',
+ 'queue_timeout'='123',
+ 'cpu_hard_limit'='1%',
+ 'scan_thread_num'='1',
+ 'max_remote_scan_thread_num'='12',
+ 'min_remote_scan_thread_num'='10',
+ 'tag'='abc',
+ 'read_bytes_per_second'='123',
+ 'remote_read_bytes_per_second'='10');
+ """
+
+ qt_select_default_val_wg_2 "select
name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num,max_remote_scan_thread_num,min_remote_scan_thread_num,spill_threshold_low_watermark,spill_threshold_high_watermark,tag,read_bytes_per_second,remote_read_bytes_per_second
from information_schema.workload_groups where name = 'default_val_wg'"
+
+ sql """
+ alter workload group default_val_wg properties(
+ 'cpu_share'='-1',
+ 'memory_limit'='-1',
+ 'enable_memory_overcommit'='true',
+ 'max_concurrency'='2147483647',
+ 'max_queue_size'='0',
+ 'queue_timeout'='0',
+ 'cpu_hard_limit'='-1',
+ 'scan_thread_num'='-1',
+ 'max_remote_scan_thread_num'='-1',
+ 'min_remote_scan_thread_num'='-1',
+ 'tag'='',
+ 'read_bytes_per_second'='-1',
+ 'remote_read_bytes_per_second'='-1'
+ );
+ """
+
+ qt_select_default_val_wg_3 "select
name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num,max_remote_scan_thread_num,min_remote_scan_thread_num,spill_threshold_low_watermark,spill_threshold_high_watermark,tag,read_bytes_per_second,remote_read_bytes_per_second
from information_schema.workload_groups where name = 'default_val_wg'"
+
+ sql "drop workload group if exists default_val_wg"
+
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]