This is an automated email from the ASF dual-hosted git repository.
dmeden pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git
The following commit(s) were added to refs/heads/master by this push:
new e54e8c8da4 records - Honour the minimum value for `--maxRecords`
(#11816)
e54e8c8da4 is described below
commit e54e8c8da41f9a9364bb8b15353fe41bba8fd750
Author: Damian Meden <[email protected]>
AuthorDate: Tue Oct 22 09:30:10 2024 +0200
records - Honour the minimum value for `--maxRecords` (#11816)
* records - Honour the minimum value for --maxRecords.
Although the default and minimum value is 2048 (as advertised) ATS will
accept any value less than that, which will end up in ATS crashing.
This change sets the advertised default value if the passed value is
invalid or less than the default.
This also makes the parameter function.
---
src/records/P_RecCore.cc | 4 ++
src/records/P_RecDefs.h | 2 +-
src/records/RecCore.cc | 2 +-
src/records/RecUtils.cc | 4 +-
src/records/RecordsConfigUtils.cc | 14 +++--
src/traffic_server/traffic_server.cc | 66 ++++++++++++++--------
.../records/ts_max_records_param.test.py | 56 ++++++++++++++++++
7 files changed, 115 insertions(+), 33 deletions(-)
diff --git a/src/records/P_RecCore.cc b/src/records/P_RecCore.cc
index 810ceb14cd..c84df8a0b2 100644
--- a/src/records/P_RecCore.cc
+++ b/src/records/P_RecCore.cc
@@ -188,6 +188,10 @@ RecSetRecord(RecT rec_type, const char *name, RecDataT
data_type, RecData *data,
goto Ldone;
}
r1 = RecAlloc(rec_type, name, data_type);
+ if (r1 == nullptr) {
+ err = REC_ERR_FAIL;
+ goto Ldone;
+ }
RecDataSet(data_type, &(r1->data), data);
if (REC_TYPE_IS_STAT(r1->rec_type) && (data_raw != nullptr)) {
r1->stat_meta.data_raw = *data_raw;
diff --git a/src/records/P_RecDefs.h b/src/records/P_RecDefs.h
index 65e65fd4d2..a712d2dc8b 100644
--- a/src/records/P_RecDefs.h
+++ b/src/records/P_RecDefs.h
@@ -30,7 +30,7 @@
// We need at least this many internal record entries for our configurations
and metrics.
// This may need adjustments if we make significant additions to librecords.
Note that
// plugins are using their own metrics systems.
-#define REC_MAX_RECORDS 2048
+#define REC_DEFAULT_ELEMENTS_SIZE 2048
#define REC_CONFIG_UPDATE_INTERVAL_MS 3000
#define REC_REMOTE_SYNC_INTERVAL_MS 5000
diff --git a/src/records/RecCore.cc b/src/records/RecCore.cc
index ec83d64cbd..57108943fc 100644
--- a/src/records/RecCore.cc
+++ b/src/records/RecCore.cc
@@ -43,7 +43,7 @@ using ts::Metrics;
// This is needed to manage the size of the librecords record. It can't be
static, because it needs to be modified
// and used (read) from several binaries / modules.
-int max_records_entries =
REC_MAX_RECORDS;
+int max_records_entries =
REC_DEFAULT_ELEMENTS_SIZE;
static bool g_initialized = false;
RecRecord *g_records = nullptr;
std::unordered_map<std::string, RecRecord *> g_records_ht;
diff --git a/src/records/RecUtils.cc b/src/records/RecUtils.cc
index bedd51c355..ffea6c74aa 100644
--- a/src/records/RecUtils.cc
+++ b/src/records/RecUtils.cc
@@ -51,8 +51,8 @@ RecRecordFree(RecRecord *r)
RecRecord *
RecAlloc(RecT rec_type, const char *name, RecDataT data_type)
{
- if (g_num_records >= REC_MAX_RECORDS) {
- Warning("too many stats/configs, please report a bug to
[email protected]");
+ if (g_num_records >= max_records_entries) {
+ Fatal("Too many config records already registered (%d). Hint: increase
--maxRecords param.", max_records_entries);
return nullptr;
}
diff --git a/src/records/RecordsConfigUtils.cc
b/src/records/RecordsConfigUtils.cc
index 03328e5067..5b887fc955 100644
--- a/src/records/RecordsConfigUtils.cc
+++ b/src/records/RecordsConfigUtils.cc
@@ -60,22 +60,22 @@ initialize_record(const RecordElement *record, void *)
}
RecDataSetFromString(record->value_type, &data, value);
-
+ RecErrT reg_status{REC_ERR_FAIL};
switch (record->value_type) {
case RECD_INT:
- RecRegisterConfigInt(type, record->name, data.rec_int, update, check,
record->regex, source, access);
+ reg_status = RecRegisterConfigInt(type, record->name, data.rec_int,
update, check, record->regex, source, access);
break;
case RECD_FLOAT:
- RecRegisterConfigFloat(type, record->name, data.rec_float, update,
check, record->regex, source, access);
+ reg_status = RecRegisterConfigFloat(type, record->name, data.rec_float,
update, check, record->regex, source, access);
break;
case RECD_STRING:
- RecRegisterConfigString(type, record->name, data.rec_string, update,
check, record->regex, source, access);
+ reg_status = RecRegisterConfigString(type, record->name,
data.rec_string, update, check, record->regex, source, access);
break;
case RECD_COUNTER:
- RecRegisterConfigCounter(type, record->name, data.rec_counter, update,
check, record->regex, source, access);
+ reg_status = RecRegisterConfigCounter(type, record->name,
data.rec_counter, update, check, record->regex, source, access);
break;
default:
@@ -83,6 +83,10 @@ initialize_record(const RecordElement *record, void *)
break;
} // switch
+ if (reg_status != REC_ERR_OKAY) {
+ ink_fatal("%s cannot be registered. Please check the logs.",
record->name);
+ }
+
RecDataZero(record->value_type, &data);
} else { // Everything else, except PROCESS, are stats. TODO: Should
modularize this too like PROCESS was done.
ink_assert(REC_TYPE_IS_STAT(type));
diff --git a/src/traffic_server/traffic_server.cc
b/src/traffic_server/traffic_server.cc
index f00e365cc1..12482c76d1 100644
--- a/src/traffic_server/traffic_server.cc
+++ b/src/traffic_server/traffic_server.cc
@@ -151,8 +151,9 @@ const long MAX_LOGIN = ink_login_name_max();
void init_ssl_ctx_callback(void *ctx, bool server);
-void load_ssl_file_callback(const char *ssl_file);
-void task_threads_started_callback();
+void load_ssl_file_callback(const char *ssl_file);
+void task_threads_started_callback();
+static void check_max_records_argument(const ArgumentDescription *arg,
unsigned int nargs, const char *val);
int num_of_net_threads = ink_number_of_processors();
int num_accept_threads = 0;
@@ -209,39 +210,39 @@ int cmd_block = 0;
int delay_listen_for_cache = 0;
ArgumentDescription argument_descriptions[] = {
- {"net_threads", 'n', "Number of Net Threads",
"I",
&num_of_net_threads, "PROXY_NET_THREADS", nullptr},
- {"udp_threads", 'U', "Number of UDP Threads",
"I",
&num_of_udp_threads, "PROXY_UDP_THREADS", nullptr},
- {"accept_thread", 'a', "Use an Accept Thread",
"T",
&num_accept_threads, "PROXY_ACCEPT_THREAD", nullptr},
- {"httpport", 'p', "Port descriptor for HTTP Accept",
"S*",
&http_accept_port_descriptor, "PROXY_HTTP_ACCEPT_PORT", nullptr},
- {"disable_freelist", 'f', "Disable the freelist memory allocator",
"T",
&cmd_disable_freelist, "PROXY_DPRINTF_LEVEL", nullptr},
+ {"net_threads", 'n', "Number of Net Threads",
"I",
&num_of_net_threads, "PROXY_NET_THREADS", nullptr
},
+ {"udp_threads", 'U', "Number of UDP Threads",
"I",
&num_of_udp_threads, "PROXY_UDP_THREADS", nullptr
},
+ {"accept_thread", 'a', "Use an Accept Thread",
"T",
&num_accept_threads, "PROXY_ACCEPT_THREAD", nullptr
},
+ {"httpport", 'p', "Port descriptor for HTTP Accept",
"S*",
&http_accept_port_descriptor, "PROXY_HTTP_ACCEPT_PORT", nullptr
},
+ {"disable_freelist", 'f', "Disable the freelist memory allocator",
"T",
&cmd_disable_freelist, "PROXY_DPRINTF_LEVEL", nullptr
},
{"disable_pfreelist", 'F', "Disable the freelist memory allocator in
ProxyAllocator", "T",
&cmd_disable_pfreelist,
- "PROXY_DPRINTF_LEVEL",
nullptr},
+ "PROXY_DPRINTF_LEVEL",
nullptr },
{"maxRecords", 'm', "Max number of librecords metrics and
configurations (default & minimum: 2048)", "I",
&max_records_entries,
- "PROXY_MAX_RECORDS",
nullptr},
+ "PROXY_MAX_RECORDS",
&check_max_records_argument},
#if TS_HAS_TESTS
- {"regression", 'R', "Regression Level (quick:1..long:3)",
"I",
®ression_level, "PROXY_REGRESSION", nullptr},
- {"regression_test", 'r', "Run Specific Regression Test",
"S512", regression_test,
"PROXY_REGRESSION_TEST", nullptr},
- {"regression_list", 'l', "List Regression Tests",
"T", ®ression_list,
"PROXY_REGRESSION_LIST", nullptr},
+ {"regression", 'R', "Regression Level (quick:1..long:3)",
"I",
®ression_level, "PROXY_REGRESSION", nullptr
},
+ {"regression_test", 'r', "Run Specific Regression Test",
"S512", regression_test,
"PROXY_REGRESSION_TEST", nullptr },
+ {"regression_list", 'l', "List Regression Tests",
"T", ®ression_list,
"PROXY_REGRESSION_LIST", nullptr },
#endif // TS_HAS_TESTS
#if TS_USE_DIAGS
- {"debug_tags", 'T', "Vertical-bar-separated Debug Tags",
"S1023", error_tags,
"PROXY_DEBUG_TAGS", nullptr},
- {"action_tags", 'B', "Vertical-bar-separated Behavior Tags",
"S1023", action_tags,
"PROXY_BEHAVIOR_TAGS", nullptr},
+ {"debug_tags", 'T', "Vertical-bar-separated Debug Tags",
"S1023", error_tags,
"PROXY_DEBUG_TAGS", nullptr },
+ {"action_tags", 'B', "Vertical-bar-separated Behavior Tags",
"S1023", action_tags,
"PROXY_BEHAVIOR_TAGS", nullptr },
#endif
- {"interval", 'i', "Statistics Interval",
"I", &show_statistics,
"PROXY_STATS_INTERVAL", nullptr},
+ {"interval", 'i', "Statistics Interval",
"I", &show_statistics,
"PROXY_STATS_INTERVAL", nullptr },
{"command", 'C',
"Maintenance Command to Execute\n"
- " Commands: list, check, clear, clear_cache, clear_hostdb,
verify_config, verify_global_plugin, verify_remap_plugin, help", "S511",
&command_string, "PROXY_COMMAND_STRING", nullptr},
- {"conf_dir", 'D', "config dir to verify",
"S511", &conf_dir,
"PROXY_CONFIG_CONFIG_DIR", nullptr},
- {"clear_hostdb", 'k', "Clear HostDB on Startup",
"F",
&auto_clear_hostdb_flag, "PROXY_CLEAR_HOSTDB", nullptr},
- {"clear_cache", 'K', "Clear Cache on Startup",
"F",
&cacheProcessor.auto_clear_flag, "PROXY_CLEAR_CACHE", nullptr},
- {"bind_stdout", '-', "Regular file to bind stdout to",
"S512", &bind_stdout,
"PROXY_BIND_STDOUT", nullptr},
- {"bind_stderr", '-', "Regular file to bind stderr to",
"S512", &bind_stderr,
"PROXY_BIND_STDERR", nullptr},
- {"accept_mss", '-', "MSS for client connections",
"I", &accept_mss,
nullptr, nullptr},
- {"poll_timeout", 't', "poll timeout in milliseconds",
"I", &poll_timeout,
nullptr, nullptr},
- {"block", '-', "block for debug attach",
"T", &cmd_block,
nullptr, nullptr},
+ " Commands: list, check, clear, clear_cache, clear_hostdb,
verify_config, verify_global_plugin, verify_remap_plugin, help", "S511",
&command_string, "PROXY_COMMAND_STRING", nullptr
},
+ {"conf_dir", 'D', "config dir to verify",
"S511", &conf_dir,
"PROXY_CONFIG_CONFIG_DIR", nullptr },
+ {"clear_hostdb", 'k', "Clear HostDB on Startup",
"F",
&auto_clear_hostdb_flag, "PROXY_CLEAR_HOSTDB", nullptr
},
+ {"clear_cache", 'K', "Clear Cache on Startup",
"F",
&cacheProcessor.auto_clear_flag, "PROXY_CLEAR_CACHE", nullptr
},
+ {"bind_stdout", '-', "Regular file to bind stdout to",
"S512", &bind_stdout,
"PROXY_BIND_STDOUT", nullptr },
+ {"bind_stderr", '-', "Regular file to bind stderr to",
"S512", &bind_stderr,
"PROXY_BIND_STDERR", nullptr },
+ {"accept_mss", '-', "MSS for client connections",
"I", &accept_mss,
nullptr, nullptr },
+ {"poll_timeout", 't', "poll timeout in milliseconds",
"I", &poll_timeout,
nullptr, nullptr },
+ {"block", '-', "block for debug attach",
"T", &cmd_block,
nullptr, nullptr },
HELP_ARGUMENT_DESCRIPTION(),
VERSION_ARGUMENT_DESCRIPTION(),
RUNROOT_ARGUMENT_DESCRIPTION(),
@@ -2392,5 +2393,22 @@ task_threads_started_callback()
hook = hook->next();
}
}
+static void
+check_max_records_argument(const ArgumentDescription * /* ATS_UNUSED arg*/,
unsigned int /* nargs ATS_UNUSED */, const char *val)
+{
+ int32_t cmd_arg{0};
+ try {
+ cmd_arg = std::stoi(val);
+ if (cmd_arg < REC_DEFAULT_ELEMENTS_SIZE) {
+ fprintf(stderr, "[WARNING] Passed maxRecords value=%d is lower than the
default value %d. Default will be used.\n", cmd_arg,
+ REC_DEFAULT_ELEMENTS_SIZE);
+ max_records_entries = REC_DEFAULT_ELEMENTS_SIZE;
+ }
+ // max_records_entries keeps the passed value.
+ } catch (std::exception const &ex) {
+ fprintf(stderr, "[ERROR] Invalid %d value for maxRecords. Default %d will
be used.\n", cmd_arg, REC_DEFAULT_ELEMENTS_SIZE);
+ max_records_entries = REC_DEFAULT_ELEMENTS_SIZE;
+ }
+}
} // end anonymous namespace
diff --git a/tests/gold_tests/records/ts_max_records_param.test.py
b/tests/gold_tests/records/ts_max_records_param.test.py
new file mode 100644
index 0000000000..573b5e3283
--- /dev/null
+++ b/tests/gold_tests/records/ts_max_records_param.test.py
@@ -0,0 +1,56 @@
+'''
+'''
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from jsonrpc import Notification, Request, Response
+
+Test.Summary = 'Basic test for traffic_server --maxRecords behavior when
setting different values.'
+
+maxRecords = 1000
+
+# 0 - maxRecords below the default value(2048). Traffic server should warn
about this and use the default value.
+ts = Test.MakeATSProcess(f"ts{maxRecords}", command=f"traffic_server
--maxRecords {maxRecords}")
+tr = Test.AddTestRun(f"--maxRecords {maxRecords}")
+tr.Processes.Default.Command = 'echo 1'
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.StartBefore(ts)
+tr.StillRunningAfter = ts
+ts.Streams.All = Testers.ContainsExpression(
+ f"Passed maxRecords value={maxRecords} is lower than the default value
2048. Default will be used.",
+ "It should use the default value")
+
+# 1 - maxRecords with just invalid number. Traffic server should warn about
this and use the default value.
+maxRecords = "abc"
+ts = Test.MakeATSProcess(f"ts{maxRecords}", command=f"traffic_server
--maxRecords {maxRecords}")
+tr = Test.AddTestRun(f"--maxRecords {maxRecords}")
+tr.Processes.Default.Command = 'echo 1'
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.StartBefore(ts)
+tr.StillRunningAfter = ts
+ts.Streams.All = Testers.ContainsExpression(
+ f"Invalid 0 value for maxRecords. Default 2048 will be used.", "It should
use the default value")
+
+# 2 - maxRecords over the default value
+maxRecords = 5000
+ts = Test.MakeATSProcess(f"ts{maxRecords}", command=f"traffic_server
--maxRecords {maxRecords}")
+tr = Test.AddTestRun(f"--maxRecords {maxRecords}")
+tr.Processes.Default.Command = 'echo 1'
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.StartBefore(ts)
+tr.StillRunningAfter = ts
+# At least it should not crash.
+ts.Disk.traffic_out.Content = Testers.ContainsExpression(f"NOTE: records
parsing completed", "should all be good")