This is an automated email from the ASF dual-hosted git repository.
xuyang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 035657c5a1 [typo](comment) Fix a lot of spell errors in be comments
(#14208)
035657c5a1 is described below
commit 035657c5a11c650d158ee126f86647cbbf41207f
Author: xy720 <[email protected]>
AuthorDate: Sat Nov 12 16:06:15 2022 +0800
[typo](comment) Fix a lot of spell errors in be comments (#14208)
fix typos.
---
be/src/common/config.h | 2 +-
be/src/common/logconfig.cpp | 2 +-
be/src/env/env_posix.cpp | 2 +-
be/src/exec/broker_scanner.cpp | 2 +-
be/src/exec/orc_scanner.cpp | 2 +-
be/src/exec/table_connector.cpp | 4 ++--
be/src/gutil/atomic_refcount.h | 4 ++--
be/src/gutil/atomicops.h | 2 +-
be/src/gutil/casts.h | 4 ++--
be/src/gutil/endian.h | 2 +-
be/src/gutil/linux_syscall_support.h | 2 +-
be/src/gutil/port.h | 4 ++--
be/src/gutil/ref_counted.h | 2 +-
be/src/http/action/stream_load.cpp | 2 +-
be/src/olap/collect_iterator.h | 6 +++---
be/src/olap/cumulative_compaction_policy.h | 6 +++---
be/src/olap/olap_common.h | 2 +-
be/src/olap/olap_server.cpp | 2 +-
be/src/olap/reader.h | 2 +-
be/src/olap/rowset/segment_v2/parsed_page.h | 2 +-
be/src/olap/rowset/segment_v2/segment_iterator.cpp | 4 ++--
be/src/olap/tablet_manager.cpp | 2 +-
be/src/olap/tuple_reader.h | 2 +-
be/src/runtime/buffered_block_mgr2.cc | 2 +-
be/src/runtime/buffered_tuple_stream3.h | 2 +-
be/src/runtime/collection_value.cpp | 4 ++--
be/src/runtime/datetime_value.cpp | 2 +-
be/src/runtime/runtime_filter_mgr.h | 2 +-
be/src/runtime/stream_load/stream_load_context.h | 4 ++--
be/src/util/doris_metrics.h | 2 +-
be/src/util/lru_cache.hpp | 2 +-
be/src/util/runtime_profile.h | 2 +-
be/src/util/thrift_util.cpp | 2 +-
be/src/util/time_lut.cpp | 2 +-
.../aggregate_functions/aggregate_function_java_udaf.h | 2 +-
.../aggregate_function_sequence_match.h | 2 +-
be/src/vec/columns/predicate_column.h | 2 +-
be/src/vec/common/field_visitors.h | 2 +-
be/src/vec/common/sort/sorter.cpp | 2 +-
be/src/vec/common/sort/topn_sorter.cpp | 2 +-
be/src/vec/exec/format/csv/csv_reader.h | 2 +-
be/src/vec/exec/format/json/new_json_reader.cpp | 4 ++--
be/src/vec/exec/join/vhash_join_node.cpp | 8 ++++----
be/src/vec/exec/join/vhash_join_node.h | 8 ++++----
be/src/vec/exec/scan/vfile_scanner.h | 4 ++--
be/src/vec/exec/scan/vscan_node.h | 2 +-
be/src/vec/exec/scan/vscanner.cpp | 2 +-
be/src/vec/exec/vjson_scanner.cpp | 6 +++---
be/src/vec/exec/vjson_scanner.h | 2 +-
be/src/vec/exec/vtable_function_node.h | 2 +-
be/src/vec/exprs/vectorized_agg_fn.cpp | 2 +-
be/src/vec/exprs/vexpr.h | 2 +-
be/src/vec/exprs/vin_predicate.cpp | 2 +-
be/src/vec/functions/plus.cpp | 2 +-
be/src/vec/runtime/vdatetime_value.cpp | 18 +++++++++---------
be/src/vec/runtime/vdatetime_value.h | 4 ++--
be/src/vec/runtime/vfile_result_writer.cpp | 2 +-
be/src/vec/runtime/vfile_result_writer.h | 2 +-
be/test/exec/broker_scanner_test.cpp | 2 +-
be/test/testutil/desc_tbl_builder.h | 2 +-
be/test/util/metrics_test.cpp | 2 +-
be/test/vec/function/function_test_util.h | 2 +-
62 files changed, 92 insertions(+), 92 deletions(-)
diff --git a/be/src/common/config.h b/be/src/common/config.h
index dbc58650f5..2061172b6a 100644
--- a/be/src/common/config.h
+++ b/be/src/common/config.h
@@ -94,7 +94,7 @@ CONF_Int32(download_worker_count, "1");
CONF_Int32(make_snapshot_worker_count, "5");
// the count of thread to release snapshot
CONF_Int32(release_snapshot_worker_count, "5");
-// the interval time(seconds) for agent report tasks signatrue to FE
+// the interval time(seconds) for agent report tasks signature to FE
CONF_mInt32(report_task_interval_seconds, "10");
// the interval time(seconds) for refresh storage policy from FE
CONF_mInt32(storage_refresh_storage_policy_task_interval_seconds, "5");
diff --git a/be/src/common/logconfig.cpp b/be/src/common/logconfig.cpp
index 829be25dfb..9e05e64a97 100644
--- a/be/src/common/logconfig.cpp
+++ b/be/src/common/logconfig.cpp
@@ -85,7 +85,7 @@ bool init_glog(const char* basename) {
}
// set log buffer level
- // defalut is 0
+ // default is 0
std::string& logbuflevel = config::log_buffer_level;
if (iequals(logbuflevel, "-1")) {
FLAGS_logbuflevel = -1;
diff --git a/be/src/env/env_posix.cpp b/be/src/env/env_posix.cpp
index 23e360cc48..9df22e2d45 100644
--- a/be/src/env/env_posix.cpp
+++ b/be/src/env/env_posix.cpp
@@ -689,7 +689,7 @@ Status PosixEnv::is_directory(const std::string& path,
bool* is_dir) {
}
Status PosixEnv::canonicalize(const std::string& path, std::string* result) {
- // NOTE: we must use free() to release the buffer retruned by realpath(),
+ // NOTE: we must use free() to release the buffer returned by realpath(),
// because the buffer is allocated by malloc(), see `man 3 realpath`.
std::unique_ptr<char[], FreeDeleter> r(realpath(path.c_str(), nullptr));
if (r == nullptr) {
diff --git a/be/src/exec/broker_scanner.cpp b/be/src/exec/broker_scanner.cpp
index d65b413927..4eaebd57d6 100644
--- a/be/src/exec/broker_scanner.cpp
+++ b/be/src/exec/broker_scanner.cpp
@@ -269,7 +269,7 @@ void BrokerScanner::split_line(const Slice& line) {
size_t start = 0; // point to the start pos of next col value.
size_t curpos = 0; // point to the start pos of separator matching
sequence.
size_t p1 = 0; // point to the current pos of separator
matching sequence.
- size_t non_space = 0; // point to the last pos of non_space charactor.
+ size_t non_space = 0; // point to the last pos of non_space character.
// Separator: AAAA
//
diff --git a/be/src/exec/orc_scanner.cpp b/be/src/exec/orc_scanner.cpp
index ec5fc23dac..2dddf2baf1 100644
--- a/be/src/exec/orc_scanner.cpp
+++ b/be/src/exec/orc_scanner.cpp
@@ -259,7 +259,7 @@ Status ORCScanner::get_next(Tuple* tuple, MemPool*
tuple_pool, bool* eof, bool*
std::string v;
if (decimal_scale_length <= scale) {
- // decimal(5,2) : the integer of 0.01 is 1, so we
should fill 0 befor integer
+ // decimal(5,2) : the integer of 0.01 is 1, so we
should fill 0 before integer
v = std::string(negative ? "-0." : "0.");
int fill_zero = scale - decimal_scale_length;
while (fill_zero--) {
diff --git a/be/src/exec/table_connector.cpp b/be/src/exec/table_connector.cpp
index 98b4ec1e3b..77c3a13449 100644
--- a/be/src/exec/table_connector.cpp
+++ b/be/src/exec/table_connector.cpp
@@ -144,7 +144,7 @@ Status TableConnector::append(const std::string&
table_name, RowBatch* batch,
break;
}
}
- // Translate utf8 string to utf16 to use unicode encodeing
+ // Translate utf8 string to utf16 to use unicode encoding
insert_stmt = utf8_to_u16string(_insert_stmt_buffer.data(),
_insert_stmt_buffer.data() +
_insert_stmt_buffer.size());
}
@@ -310,7 +310,7 @@ Status TableConnector::append(const std::string&
table_name, vectorized::Block*
break;
}
}
- // Translate utf8 string to utf16 to use unicode encodeing
+ // Translate utf8 string to utf16 to use unicode encoding
insert_stmt = utf8_to_u16string(_insert_stmt_buffer.data(),
_insert_stmt_buffer.data() +
_insert_stmt_buffer.size());
}
diff --git a/be/src/gutil/atomic_refcount.h b/be/src/gutil/atomic_refcount.h
index 439f8809f0..469fe0e09d 100644
--- a/be/src/gutil/atomic_refcount.h
+++ b/be/src/gutil/atomic_refcount.h
@@ -79,9 +79,9 @@ inline bool RefCountDec(volatile Atomic32* ptr) {
// Return whether the reference count is one.
// If the reference count is used in the conventional way, a
-// refrerence count of 1 implies that the current thread owns the
+// reference count of 1 implies that the current thread owns the
// reference and no other thread shares it.
-// This call performs the test for a referenece count of one, and
+// This call performs the test for a reference count of one, and
// performs the memory barrier needed for the owning thread
// to act on the object, knowing that it has exclusive access to the
// object.
diff --git a/be/src/gutil/atomicops.h b/be/src/gutil/atomicops.h
index 52715d22b1..7873a39170 100644
--- a/be/src/gutil/atomicops.h
+++ b/be/src/gutil/atomicops.h
@@ -18,7 +18,7 @@
// alternative. You should assume only properties explicitly guaranteed by the
// specifications in this file. You are almost certainly _not_ writing code
// just for the x86; if you assume x86 semantics, x86 hardware bugs and
-// implementations on other archtectures will cause your code to break. If you
+// implementations on other architectures will cause your code to break. If
you
// do not know what you are doing, avoid these routines, and use a Mutex.
//
// These following lower-level operations are typically useful only to people
diff --git a/be/src/gutil/casts.h b/be/src/gutil/casts.h
index 95064e6b11..71a147cfaf 100644
--- a/be/src/gutil/casts.h
+++ b/be/src/gutil/casts.h
@@ -123,7 +123,7 @@ inline To down_cast(From& f) {
//
// This is true for any cast syntax, either *(int*)&f or
// *reinterpret_cast<int*>(&f). And it is particularly true for
-// conversions betweeen integral lvalues and floating-point lvalues.
+// conversions between integral lvalues and floating-point lvalues.
//
// The purpose of 3.10 -15- is to allow optimizing compilers to assume
// that expressions with different types refer to different memory. gcc
@@ -184,7 +184,7 @@ inline Dest bit_cast(const Source& source) {
// enum A { A_min = -18, A_max = 33 };
// MAKE_ENUM_LIMITS(A, A_min, A_max)
//
-// Convert an enum to an int in one of two ways. The prefered way is a
+// Convert an enum to an int in one of two ways. The preferred way is a
// tight conversion, which ensures that A_min <= value <= A_max.
//
// A var = tight_enum_cast<A>(3);
diff --git a/be/src/gutil/endian.h b/be/src/gutil/endian.h
index 89aacf0153..9ced4f8261 100644
--- a/be/src/gutil/endian.h
+++ b/be/src/gutil/endian.h
@@ -26,7 +26,7 @@
// but don't require including the dangerous netinet/in.h.
//
// Buffer routines will copy to and from buffers without causing
-// a bus error when the architecture requires differnt byte alignments
+// a bus error when the architecture requires different byte alignments
#pragma once
diff --git a/be/src/gutil/linux_syscall_support.h
b/be/src/gutil/linux_syscall_support.h
index 03ede26bb8..78d9ff62ba 100644
--- a/be/src/gutil/linux_syscall_support.h
+++ b/be/src/gutil/linux_syscall_support.h
@@ -48,7 +48,7 @@
* the necessary definitions.
*
* SYS_ERRNO:
- * All system calls will update "errno" unless overriden by setting the
+ * All system calls will update "errno" unless overridden by setting the
* SYS_ERRNO macro prior to including this file. SYS_ERRNO should be
* an l-value.
*
diff --git a/be/src/gutil/port.h b/be/src/gutil/port.h
index 62f17d4d8f..aadccba222 100644
--- a/be/src/gutil/port.h
+++ b/be/src/gutil/port.h
@@ -147,7 +147,7 @@ static inline uint64 bswap_64(uint64 x) {
#endif
// define the macros IS_LITTLE_ENDIAN or IS_BIG_ENDIAN
-// using the above endian defintions from endian.h if
+// using the above endian definitions from endian.h if
// endian.h was included
#ifdef __BYTE_ORDER
#if __BYTE_ORDER == __LITTLE_ENDIAN
@@ -280,7 +280,7 @@ inline void* memrchr(const void* bytes, int find_char,
size_t len) {
#endif
-// Klocwork static analysis tool's C/C++ complier kwcc
+// Klocwork static analysis tool's C/C++ compiler kwcc
#if defined(__KLOCWORK__)
#define STATIC_ANALYSIS
#endif // __KLOCWORK__
diff --git a/be/src/gutil/ref_counted.h b/be/src/gutil/ref_counted.h
index 210ea3ad6c..96086b4aeb 100644
--- a/be/src/gutil/ref_counted.h
+++ b/be/src/gutil/ref_counted.h
@@ -78,7 +78,7 @@ private:
// };
//
// You should always make your destructor private, to avoid any code deleting
-// the object accidently while there are references to it.
+// the object accidentally while there are references to it.
template <class T>
class RefCounted : public subtle::RefCountedBase {
public:
diff --git a/be/src/http/action/stream_load.cpp
b/be/src/http/action/stream_load.cpp
index 150b8b5337..69bfeabe48 100644
--- a/be/src/http/action/stream_load.cpp
+++ b/be/src/http/action/stream_load.cpp
@@ -178,7 +178,7 @@ void StreamLoadAction::handle(HttpRequest* req) {
_save_stream_load_record(ctx, str);
}
#endif
- // update statstics
+ // update statistics
streaming_load_requests_total->increment(1);
streaming_load_duration_ms->increment(ctx->load_cost_millis);
streaming_load_current_processing->increment(-1);
diff --git a/be/src/olap/collect_iterator.h b/be/src/olap/collect_iterator.h
index e765013666..f5bcc3aa7b 100644
--- a/be/src/olap/collect_iterator.h
+++ b/be/src/olap/collect_iterator.h
@@ -75,7 +75,7 @@ private:
// Only use in unique reader. Heap will set _skip_row = true.
// when build heap find the row in LevelIterator have same key but
lower version or sequence
- // the row of LevelIteratro should be skiped to prevent useless
compare and function call
+ // the row of LevelIteratro should be skipped to prevent useless
compare and function call
mutable bool _skip_row = false;
};
@@ -173,7 +173,7 @@ private:
Status _normal_next(const RowCursor** row, bool* delete_flag);
// Each LevelIterator corresponds to a rowset reader,
- // it will be cleared after '_heap' has been initilized when '_merge
== true'.
+ // it will be cleared after '_heap' has been initialized when '_merge
== true'.
std::list<LevelIterator*> _children;
// point to the Level0Iterator containing the next output row.
// null when CollectIterator hasn't been initialized or reaches EOF.
@@ -201,7 +201,7 @@ private:
std::unique_ptr<LevelIterator> _inner_iter;
// Each LevelIterator corresponds to a rowset reader,
- // it will be cleared after '_inner_iter' has been initilized.
+ // it will be cleared after '_inner_iter' has been initialized.
std::list<LevelIterator*> _children;
bool _merge = true;
diff --git a/be/src/olap/cumulative_compaction_policy.h
b/be/src/olap/cumulative_compaction_policy.h
index 4abdd3497a..16e9b3b19b 100644
--- a/be/src/olap/cumulative_compaction_policy.h
+++ b/be/src/olap/cumulative_compaction_policy.h
@@ -90,8 +90,8 @@ public:
RowsetSharedPtr output_rowset,
Version& last_delete_version) = 0;
- /// Calculate tablet's cumulatiuve point before compaction. This
calculation just executes once when the tablet compacts
- /// first time after BE initialization and then motion of cumulatiuve
point depends on update_cumulative_point policy.
+ /// Calculate tablet's cumulative point before compaction. This
calculation just executes once when the tablet compacts
+ /// first time after BE initialization and then motion of cumulative point
depends on update_cumulative_point policy.
/// This function is pure virtual function. In general, the cumulative
point splits the rowsets into two parts:
/// base rowsets, cumulative rowsets.
/// param all_rowsets, all rowsets in the tablet
@@ -106,7 +106,7 @@ public:
virtual std::string name() = 0;
};
-/// SizeBased cumulative compaction policy implemention. SizeBased policy
which derives CumulativeCompactionPolicy is a optimized
+/// SizeBased cumulative compaction policy implementation. SizeBased policy
which derives CumulativeCompactionPolicy is a optimized
/// version of num based cumulative compaction policy. This policy also uses
linear structure to compact rowsets. The cumulative rowsets
/// can do compaction when they are in same level size. And when output rowset
exceeds the promotion radio of base size or min promotion
/// size, it will do base compaction. This policy is targeting the use cases
requiring lower write amplification, trading off read
diff --git a/be/src/olap/olap_common.h b/be/src/olap/olap_common.h
index 0578fcbfb3..e16aa8e5b4 100644
--- a/be/src/olap/olap_common.h
+++ b/be/src/olap/olap_common.h
@@ -330,7 +330,7 @@ struct OlapReaderStatistics {
// general_debug_ns is designed for the purpose of DEBUG, to record any
infomations of debugging or profiling.
// different from specific meaningful timer such as index_load_ns,
general_debug_ns can be used flexibly.
// general_debug_ns has associated with OlapScanNode's
_general_debug_timer already.
- // so general_debug_ns' values will update to _general_debug_timer
automaticly,
+ // so general_debug_ns' values will update to _general_debug_timer
automatically,
// the timer result can be checked through QueryProfile web page easily.
// when search general_debug_ns, you can find that general_debug_ns has
not been used,
// this is because such codes added for debug purpose should not commit,
it's just for debuging.
diff --git a/be/src/olap/olap_server.cpp b/be/src/olap/olap_server.cpp
index 0462ee6c64..6e39b0f093 100644
--- a/be/src/olap/olap_server.cpp
+++ b/be/src/olap/olap_server.cpp
@@ -495,7 +495,7 @@ std::vector<TabletSharedPtr>
StorageEngine::_generate_compaction_tasks(
std::shuffle(data_dirs.begin(), data_dirs.end(), g);
// Copy _tablet_submitted_xxx_compaction map so that we don't need to hold
_tablet_submitted_compaction_mutex
- // when travesing the data dir
+ // when traversing the data dir
std::map<DataDir*, std::unordered_set<TTabletId>> copied_cumu_map;
std::map<DataDir*, std::unordered_set<TTabletId>> copied_base_map;
{
diff --git a/be/src/olap/reader.h b/be/src/olap/reader.h
index e3e29f0cc8..2395888ffc 100644
--- a/be/src/olap/reader.h
+++ b/be/src/olap/reader.h
@@ -94,7 +94,7 @@ public:
std::unordered_set<uint32_t>* tablet_columns_convert_to_null_set =
nullptr;
TPushAggOp::type push_down_agg_type_opt = TPushAggOp::NONE;
- // used for comapction to record row ids
+ // used for compaction to record row ids
bool record_rowids = false;
// used for special optimization for query : ORDER BY key LIMIT n
bool read_orderby_key = false;
diff --git a/be/src/olap/rowset/segment_v2/parsed_page.h
b/be/src/olap/rowset/segment_v2/parsed_page.h
index f3b6ed7b55..fe21f11dc3 100644
--- a/be/src/olap/rowset/segment_v2/parsed_page.h
+++ b/be/src/olap/rowset/segment_v2/parsed_page.h
@@ -90,7 +90,7 @@ struct ParsedPage {
// number of rows including nulls and not-nulls
ordinal_t num_rows = 0;
// record it to get the last array element's size
- // should be none zero if setted in page
+ // should be none zero if set in page
ordinal_t next_array_item_ordinal = 0;
PagePointer page_pointer;
diff --git a/be/src/olap/rowset/segment_v2/segment_iterator.cpp
b/be/src/olap/rowset/segment_v2/segment_iterator.cpp
index a85c0e35db..dc27fb86a3 100644
--- a/be/src/olap/rowset/segment_v2/segment_iterator.cpp
+++ b/be/src/olap/rowset/segment_v2/segment_iterator.cpp
@@ -697,7 +697,7 @@ Status SegmentIterator::next_batch(RowBlockV2* block) {
return Status::OK();
}
-/* ---------------------- for vecterization implementation
---------------------- */
+/* ---------------------- for vectorization implementation
---------------------- */
/**
* For storage layer data type, can be measured from two perspectives:
@@ -1135,7 +1135,7 @@ Status SegmentIterator::next_batch(vectorized::Block*
block) {
// step 1: evaluate vectorization predicate
selected_size = _evaluate_vectorization_predicate(sel_rowid_idx,
selected_size);
- // step 2: evaluate short ciruit predicate
+ // step 2: evaluate short circuit predicate
// todo(wb) research whether need to read short predicate after
vectorization evaluation
// to reduce cost of read short circuit columns.
// In SSB test, it make no difference; So need more scenarios
to test
diff --git a/be/src/olap/tablet_manager.cpp b/be/src/olap/tablet_manager.cpp
index 4a8fc9f1d1..40e700736d 100644
--- a/be/src/olap/tablet_manager.cpp
+++ b/be/src/olap/tablet_manager.cpp
@@ -119,7 +119,7 @@ Status TabletManager::_add_tablet_unlocked(TTabletId
tablet_id, const TabletShar
}
// During storage migration, the tablet is moved to another disk, have to
check
- // if the new tablet's rowset version is larger than the old one to prvent
losting data during
+ // if the new tablet's rowset version is larger than the old one to
prevent losting data during
// migration
int64_t old_time, new_time;
int32_t old_version, new_version;
diff --git a/be/src/olap/tuple_reader.h b/be/src/olap/tuple_reader.h
index 7045393e1b..191594c6d7 100644
--- a/be/src/olap/tuple_reader.h
+++ b/be/src/olap/tuple_reader.h
@@ -45,7 +45,7 @@ private:
friend class CollectIterator;
friend class DeleteHandler;
- // Direcly read row from rowset and pass to upper caller. No need to do
aggregation.
+ // Directly read row from rowset and pass to upper caller. No need to do
aggregation.
// This is usually used for DUPLICATE KEY tables
Status _direct_next_row(RowCursor* row_cursor, MemPool* mem_pool,
ObjectPool* agg_pool,
bool* eof);
diff --git a/be/src/runtime/buffered_block_mgr2.cc
b/be/src/runtime/buffered_block_mgr2.cc
index e610cf3803..ffa3c52b9b 100644
--- a/be/src/runtime/buffered_block_mgr2.cc
+++ b/be/src/runtime/buffered_block_mgr2.cc
@@ -457,7 +457,7 @@ BufferedBlockMgr2::~BufferedBlockMgr2() {
lock_guard<SpinLock> lock(_s_block_mgrs_lock);
BlockMgrsMap::iterator it = _s_query_to_block_mgrs.find(_query_id);
// IMPALA-2286: Another fragment may have called create() for this
_query_id and
- // saw that this BufferedBlockMgr2 is being destructed. That
fragement will
+ // saw that this BufferedBlockMgr2 is being destructed. That fragment
will
// overwrite the map entry for _query_id, pointing it to a different
// BufferedBlockMgr2 object. We should let that object's destructor
remove the
// entry. On the other hand, if the second BufferedBlockMgr2 is
destructed before
diff --git a/be/src/runtime/buffered_tuple_stream3.h
b/be/src/runtime/buffered_tuple_stream3.h
index 6f5ba3dae4..a225b5d892 100644
--- a/be/src/runtime/buffered_tuple_stream3.h
+++ b/be/src/runtime/buffered_tuple_stream3.h
@@ -259,7 +259,7 @@ public:
/// unpinning the stream.
/// c) The append fails with a runtime error. Returns false and sets
'status' to an
/// error.
- /// d) The append fails becase the row is too large to fit in a page of a
stream.
+ /// d) The append fails because the row is too large to fit in a page of a
stream.
/// Returns false and sets 'status' to an error.
///
/// Unpinned streams can only encounter case b) when appending a row
larger than
diff --git a/be/src/runtime/collection_value.cpp
b/be/src/runtime/collection_value.cpp
index 345fc5c329..13185e1cba 100644
--- a/be/src/runtime/collection_value.cpp
+++ b/be/src/runtime/collection_value.cpp
@@ -523,7 +523,7 @@ void CollectionValue::deep_copy_collection(CollectionValue*
shallow_copied_cv,
} else {
cv->set_null_signs(nullptr);
}
- // copy and assgin data
+ // copy and assign data
memory_copy(coll_data + nulls_size, cv->data(), coll_byte_size);
cv->set_data(coll_data + nulls_size);
@@ -546,7 +546,7 @@ void
CollectionValue::deserialize_collection(CollectionValue* cv, const char* tu
new (cv) CollectionValue(cv->length());
return;
}
- // assgin data and null_sign pointer position in tuple_data
+ // assign data and null_sign pointer position in tuple_data
int64_t data_offset = convert_to<int64_t>(cv->data());
cv->set_data(convert_to<char*>(tuple_data + data_offset));
if (cv->has_null()) {
diff --git a/be/src/runtime/datetime_value.cpp
b/be/src/runtime/datetime_value.cpp
index fc2843fa00..13485e3ff9 100644
--- a/be/src/runtime/datetime_value.cpp
+++ b/be/src/runtime/datetime_value.cpp
@@ -1451,7 +1451,7 @@ bool DateTimeValue::from_date_format_str(const char*
format, int format_len, con
}
// 1. already_set_date_part means _year, _month, _day be set, so we only
set time part
// 2. already_set_time_part means _hour, _minute, _second, _microsecond be
set,
- // so we only neet to set date part
+ // so we only need to set date part
// 3. if both are true, means all part of date_time be set, no need
check_range_and_set_time
bool already_set_date_part = yearday > 0 || (week_num >= 0 && weekday > 0);
if (already_set_date_part && already_set_time_part) return true;
diff --git a/be/src/runtime/runtime_filter_mgr.h
b/be/src/runtime/runtime_filter_mgr.h
index 63960f0182..4c6650d9f3 100644
--- a/be/src/runtime/runtime_filter_mgr.h
+++ b/be/src/runtime/runtime_filter_mgr.h
@@ -168,7 +168,7 @@ public:
std::shared_ptr<RuntimeFilterMergeControllerEntity>*
handle,
RuntimeState* state);
// thread safe
- // increate a reference count
+ // increase a reference count
// if a query-id is not exist
// Status.not_ok will be returned and a empty ptr will returned by *handle
Status acquire(UniqueId query_id,
std::shared_ptr<RuntimeFilterMergeControllerEntity>* handle);
diff --git a/be/src/runtime/stream_load/stream_load_context.h
b/be/src/runtime/stream_load/stream_load_context.h
index 1a35d85874..795b0f304b 100644
--- a/be/src/runtime/stream_load/stream_load_context.h
+++ b/be/src/runtime/stream_load/stream_load_context.h
@@ -114,7 +114,7 @@ public:
bool unref() { return _refs.fetch_sub(1) == 1; }
public:
- // load type, eg: ROUTINE LOAD/MANUL LOAD
+ // load type, eg: ROUTINE LOAD/MANUAL LOAD
TLoadType::type load_type;
// load data source: eg: KAFKA/RAW
TLoadSourceType::type load_src_type;
@@ -200,7 +200,7 @@ public:
// to identified a specified data consumer.
int64_t consumer_id;
- // If this is an tranactional insert operation, this will be true
+ // If this is an transactional insert operation, this will be true
bool need_commit_self = false;
// csv with header type
diff --git a/be/src/util/doris_metrics.h b/be/src/util/doris_metrics.h
index 2b5ca205bf..7536be7372 100644
--- a/be/src/util/doris_metrics.h
+++ b/be/src/util/doris_metrics.h
@@ -139,7 +139,7 @@ public:
// permits have been used for all compaction tasks
IntGauge* compaction_used_permits;
- // permits required by the compaction task which is waitting for permits
+ // permits required by the compaction task which is waiting for permits
IntGauge* compaction_waitting_permits;
HistogramMetric* tablet_version_num_distribution;
diff --git a/be/src/util/lru_cache.hpp b/be/src/util/lru_cache.hpp
index 16780fb8b2..1bbf6c6e7a 100644
--- a/be/src/util/lru_cache.hpp
+++ b/be/src/util/lru_cache.hpp
@@ -84,7 +84,7 @@ public:
}
}
- // Must copy value, because value maybe relased when caller used
+ // Must copy value, because value maybe released when caller used
bool get(const Key& key, Value* value) {
auto it = _cache_items_map.find(key);
if (it == _cache_items_map.end()) {
diff --git a/be/src/util/runtime_profile.h b/be/src/util/runtime_profile.h
index 56ca28281c..55b4b2c3d2 100644
--- a/be/src/util/runtime_profile.h
+++ b/be/src/util/runtime_profile.h
@@ -478,7 +478,7 @@ private:
// Helper function to compute compute the fraction of the total time spent
in
// this profile and its children.
- // Called recusively.
+ // Called recursively.
void compute_time_in_profile(int64_t total_time);
// Print the child counters of the given counter name
diff --git a/be/src/util/thrift_util.cpp b/be/src/util/thrift_util.cpp
index 925d4aac32..f533eeb1c1 100644
--- a/be/src/util/thrift_util.cpp
+++ b/be/src/util/thrift_util.cpp
@@ -29,7 +29,7 @@
#include "util/hash_util.hpp"
#include "util/thrift_server.h"
-// TCompactProtocol requires some #defines to work right. They also define
UNLIKLEY
+// TCompactProtocol requires some #defines to work right. They also define
UNLIKELY
// so we need to undef this.
// TODO: is there a better include to use?
#ifdef UNLIKELY
diff --git a/be/src/util/time_lut.cpp b/be/src/util/time_lut.cpp
index 5f22bc3cea..bc37847fe1 100644
--- a/be/src/util/time_lut.cpp
+++ b/be/src/util/time_lut.cpp
@@ -47,7 +47,7 @@ uint8_t calc_week(uint16_t year, uint8_t month, uint8_t day,
bool monday_first,
int days = 0;
*to_year = year;
- // Check wether the first days of this year belongs to last year
+ // Check weather the first days of this year belongs to last year
if (month == 1 && day <= (7 - weekday_first_day)) {
if (!week_year && ((first_weekday && weekday_first_day != 0) ||
(!first_weekday && weekday_first_day > 3))) {
diff --git a/be/src/vec/aggregate_functions/aggregate_function_java_udaf.h
b/be/src/vec/aggregate_functions/aggregate_function_java_udaf.h
index ba442f3b79..f61d40443a 100644
--- a/be/src/vec/aggregate_functions/aggregate_function_java_udaf.h
+++ b/be/src/vec/aggregate_functions/aggregate_function_java_udaf.h
@@ -44,7 +44,7 @@ const char* UDAF_EXECUTOR_ADD_SIGNATURE = "(ZJJ)V";
const char* UDAF_EXECUTOR_SERIALIZE_SIGNATURE = "(J)[B";
const char* UDAF_EXECUTOR_MERGE_SIGNATURE = "(J[B)V";
const char* UDAF_EXECUTOR_RESULT_SIGNATURE = "(JJ)Z";
-// Calling Java method about those signture means:
"(argument-types)return-type"
+// Calling Java method about those signature means:
"(argument-types)return-type"
//
https://www.iitk.ac.in/esc101/05Aug/tutorial/native1.1/implementing/method.html
struct AggregateJavaUdafData {
diff --git a/be/src/vec/aggregate_functions/aggregate_function_sequence_match.h
b/be/src/vec/aggregate_functions/aggregate_function_sequence_match.h
index d9daee8dfd..df383d199d 100644
--- a/be/src/vec/aggregate_functions/aggregate_function_sequence_match.h
+++ b/be/src/vec/aggregate_functions/aggregate_function_sequence_match.h
@@ -188,7 +188,7 @@ private:
const char* begin = pos;
const char* end = pos + pattern.size();
- // Pattern is checked in fe, so pattern should be vaild here, we check
it and if pattern is invalid, we return.
+ // Pattern is checked in fe, so pattern should be valid here, we check
it and if pattern is invalid, we return.
auto throw_exception = [&](const std::string& msg) {
LOG(WARNING) << msg + " '" + std::string(pos, end) + "' at
position " +
std::to_string(pos - begin);
diff --git a/be/src/vec/columns/predicate_column.h
b/be/src/vec/columns/predicate_column.h
index c6ab98a3c7..f307d12473 100644
--- a/be/src/vec/columns/predicate_column.h
+++ b/be/src/vec/columns/predicate_column.h
@@ -387,7 +387,7 @@ public:
LOG(FATAL) << "get field not supported in PredicateColumnType";
}
- // it's impossable to use ComplexType as key , so we don't have to
implemnt them
+ // it's impossible to use ComplexType as key , so we don't have to
implement them
[[noreturn]] StringRef serialize_value_into_arena(size_t n, Arena& arena,
char const*& begin)
const override {
LOG(FATAL) << "serialize_value_into_arena not supported in
PredicateColumnType";
diff --git a/be/src/vec/common/field_visitors.h
b/be/src/vec/common/field_visitors.h
index 1dee4491b5..9b19f1946b 100644
--- a/be/src/vec/common/field_visitors.h
+++ b/be/src/vec/common/field_visitors.h
@@ -195,7 +195,7 @@ public:
String operator()(const AggregateFunctionStateData& x) const;
};
-/** Converts numberic value of any type to specified type. */
+/** Converts numeric value of any type to specified type. */
template <typename T>
class FieldVisitorConvertToNumber : public StaticVisitor<T> {
public:
diff --git a/be/src/vec/common/sort/sorter.cpp
b/be/src/vec/common/sort/sorter.cpp
index 4fb99cb507..94ad261165 100644
--- a/be/src/vec/common/sort/sorter.cpp
+++ b/be/src/vec/common/sort/sorter.cpp
@@ -177,7 +177,7 @@ Status FullSorter::_do_sort() {
// dispose TOP-N logic
if (_limit != -1) {
- // Here is a little opt to reduce the mem uasge, we build a max heap
+ // Here is a little opt to reduce the mem usage, we build a max heap
// to order the block in _block_priority_queue.
// if one block totally greater the heap top of _block_priority_queue
// we can throw the block data directly.
diff --git a/be/src/vec/common/sort/topn_sorter.cpp
b/be/src/vec/common/sort/topn_sorter.cpp
index a12186fac5..411a41efd5 100644
--- a/be/src/vec/common/sort/topn_sorter.cpp
+++ b/be/src/vec/common/sort/topn_sorter.cpp
@@ -58,7 +58,7 @@ Status TopNSorter::_do_sort(Block* block) {
// dispose TOP-N logic
if (_limit != -1) {
- // Here is a little opt to reduce the mem uasge, we build a max heap
+ // Here is a little opt to reduce the mem usage, we build a max heap
// to order the block in _block_priority_queue.
// if one block totally greater the heap top of _block_priority_queue
// we can throw the block data directly.
diff --git a/be/src/vec/exec/format/csv/csv_reader.h
b/be/src/vec/exec/format/csv/csv_reader.h
index e7d2f7f7c0..5083c00d2d 100644
--- a/be/src/vec/exec/format/csv/csv_reader.h
+++ b/be/src/vec/exec/format/csv/csv_reader.h
@@ -78,7 +78,7 @@ private:
const std::vector<SlotDescriptor*>& _file_slot_descs;
// Only for query task, save the columns' index which need to be read.
// eg, there are 3 cols in "_file_slot_descs" named: k1, k2, k3
- // and the corressponding position in file is 0, 3, 5.
+ // and the corresponding position in file is 0, 3, 5.
// So the _col_idx will be: <0, 3, 5>
std::vector<int> _col_idxs;
// True if this is a load task
diff --git a/be/src/vec/exec/format/json/new_json_reader.cpp
b/be/src/vec/exec/format/json/new_json_reader.cpp
index 6b60a144cf..aed9ecdfc4 100644
--- a/be/src/vec/exec/format/json/new_json_reader.cpp
+++ b/be/src/vec/exec/format/json/new_json_reader.cpp
@@ -500,7 +500,7 @@ Status NewJsonReader::_parse_json(bool* is_empty_row, bool*
eof) {
// read one json string from line reader or file reader and parse it to json
doc.
// return Status::DataQualityError() if data has quality error.
-// return other error if encounter other problemes.
+// return other error if encounter other problems.
// return Status::OK() if parse succeed or reach EOF.
Status NewJsonReader::_parse_json_doc(size_t* size, bool* eof) {
// read a whole message
@@ -620,7 +620,7 @@ Status NewJsonReader::_parse_json_doc(size_t* size, bool*
eof) {
// for simple format json
// set valid to true and return OK if succeed.
// set valid to false and return OK if we met an invalid row.
-// return other status if encounter other problmes.
+// return other status if encounter other problems.
Status NewJsonReader::_set_column_value(rapidjson::Value& objectValue,
std::vector<MutableColumnPtr>& columns,
const std::vector<SlotDescriptor*>&
slot_descs,
diff --git a/be/src/vec/exec/join/vhash_join_node.cpp
b/be/src/vec/exec/join/vhash_join_node.cpp
index b067604d87..8bea283f07 100644
--- a/be/src/vec/exec/join/vhash_join_node.cpp
+++ b/be/src/vec/exec/join/vhash_join_node.cpp
@@ -544,7 +544,7 @@ DISABLE_OPTIMIZATION Status
ProcessHashTableProbe<JoinOpType>::do_process_with_o
auto& mcol = mutable_block.mutable_columns();
// use in right join to change visited state after
- // exec the vother join conjunt
+ // exec the vother join conjunct
std::vector<bool*> visited_map;
visited_map.reserve(1.2 * _batch_size);
@@ -664,7 +664,7 @@ DISABLE_OPTIMIZATION Status
ProcessHashTableProbe<JoinOpType>::do_process_with_o
}
output_block->swap(mutable_block.to_block());
- // dispose the other join conjunt exec
+ // dispose the other join conjunct exec
if (output_block->rows()) {
int result_column_id = -1;
int orig_columns = output_block->columns();
@@ -699,7 +699,7 @@ DISABLE_OPTIMIZATION Status
ProcessHashTableProbe<JoinOpType>::do_process_with_o
*visited_map[i] |= other_hit;
filter_map.push_back(other_hit || !same_to_prev[i] ||
(!column->get_bool(i - 1) &&
filter_map.back()));
- // Here to keep only hit join conjunt and other join
conjunt is true need to be output.
+ // Here to keep only hit join conjunct and other join
conjunt is true need to be output.
// if not, only some key must keep one row will output
will null right table column
if (same_to_prev[i] && filter_map.back() &&
!column->get_bool(i - 1))
filter_map[i - 1] = false;
@@ -1289,7 +1289,7 @@ Status
HashJoinNode::_materialize_build_side(RuntimeState* state) {
BUILD_BLOCK_MAX_SIZE *
_MAX_BUILD_BLOCK_COUNT));
}
_build_blocks.emplace_back(mutable_block.to_block());
- // TODO:: Rethink may we should do the proess after we recevie all
build blocks ?
+ // TODO:: Rethink may we should do the process after we receive
all build blocks ?
// which is better.
RETURN_IF_ERROR(_process_build_block(state, _build_blocks[index],
index));
diff --git a/be/src/vec/exec/join/vhash_join_node.h
b/be/src/vec/exec/join/vhash_join_node.h
index a190b2e713..25a40159bc 100644
--- a/be/src/vec/exec/join/vhash_join_node.h
+++ b/be/src/vec/exec/join/vhash_join_node.h
@@ -177,17 +177,17 @@ struct ProcessHashTableProbe {
void probe_side_output_column(MutableColumns& mcol, const
std::vector<bool>& output_slot_flags,
int size, int last_probe_index, size_t
probe_size,
bool all_match_one, bool
have_other_join_conjunct);
- // Only process the join with no other join conjunt, because of no other
join conjunt
+ // Only process the join with no other join conjunct, because of no other
join conjunt
// the output block struct is same with mutable block. we can do more opt
on it and simplify
// the logic of probe
// TODO: opt the visited here to reduce the size of hash table
template <bool need_null_map_for_probe, bool ignore_null, typename
HashTableType>
Status do_process(HashTableType& hash_table_ctx, ConstNullMapPtr null_map,
MutableBlock& mutable_block, Block* output_block, size_t
probe_rows);
- // In the presence of other join conjunt, the process of join become more
complicated.
- // each matching join column need to be processed by other join conjunt.
so the sturct of mutable block
+ // In the presence of other join conjunct, the process of join become more
complicated.
+ // each matching join column need to be processed by other join conjunct.
so the struct of mutable block
// and output block may be different
- // The output result is determined by the other join conjunt result and
same_to_prev struct
+ // The output result is determined by the other join conjunct result and
same_to_prev struct
template <bool need_null_map_for_probe, bool ignore_null, typename
HashTableType>
Status do_process_with_other_join_conjuncts(HashTableType& hash_table_ctx,
ConstNullMapPtr null_map,
diff --git a/be/src/vec/exec/scan/vfile_scanner.h
b/be/src/vec/exec/scan/vfile_scanner.h
index 3b3ca5c615..3edd75a5ac 100644
--- a/be/src/vec/exec/scan/vfile_scanner.h
+++ b/be/src/vec/exec/scan/vfile_scanner.h
@@ -74,7 +74,7 @@ protected:
std::unordered_map<SlotId, int> _partition_slot_index_map;
// created from param.expr_of_dest_slot
// For query, it saves default value expr of all dest columns, or nullptr
for NULL.
- // For load, it saves convertion expr/default value of all dest columns.
+ // For load, it saves conversion expr/default value of all dest columns.
std::vector<vectorized::VExprContext*> _dest_vexpr_ctx;
// dest slot name to index in _dest_vexpr_ctx;
std::unordered_map<std::string, int> _dest_slot_name_to_idx;
@@ -90,7 +90,7 @@ protected:
// Get from GenericReader, save the existing columns in file to their type.
std::unordered_map<std::string, TypeDescriptor> _name_to_col_type;
- // Get from GenericReader, save columns that requried by scan but not
exist in file.
+ // Get from GenericReader, save columns that required by scan but not
exist in file.
// These columns will be filled by default value or null.
std::unordered_set<std::string> _missing_cols;
diff --git a/be/src/vec/exec/scan/vscan_node.h
b/be/src/vec/exec/scan/vscan_node.h
index a2aaf5ffd4..e19b4f347f 100644
--- a/be/src/vec/exec/scan/vscan_node.h
+++ b/be/src/vec/exec/scan/vscan_node.h
@@ -197,7 +197,7 @@ protected:
_slot_id_to_value_range;
// column -> ColumnValueRange
std::unordered_map<std::string, ColumnValueRangeType>
_colname_to_value_range;
- // We use _colname_to_value_range to store a column and its conresponding
value ranges.
+ // We use _colname_to_value_range to store a column and its corresponding
value ranges.
// But if a col is with value range, eg: 1 < col < 10, which is
"!is_fixed_range",
// in this case we can not merge "1 < col < 10" with "col not in (2)".
// So we have to save "col not in (2)" to another structure:
"_not_in_value_ranges".
diff --git a/be/src/vec/exec/scan/vscanner.cpp
b/be/src/vec/exec/scan/vscanner.cpp
index 0853c59391..20e2a882c4 100644
--- a/be/src/vec/exec/scan/vscanner.cpp
+++ b/be/src/vec/exec/scan/vscanner.cpp
@@ -96,7 +96,7 @@ Status VScanner::try_append_late_arrival_runtime_filter() {
if (_vconjunct_ctx) {
_discard_conjuncts();
}
- // Notice that the number of runtiem filters may be larger than
_applied_rf_num.
+ // Notice that the number of runtime filters may be larger than
_applied_rf_num.
// But it is ok because it will be updated at next time.
RETURN_IF_ERROR(_parent->clone_vconjunct_ctx(&_vconjunct_ctx));
_applied_rf_num = arrived_rf_num;
diff --git a/be/src/vec/exec/vjson_scanner.cpp
b/be/src/vec/exec/vjson_scanner.cpp
index 37f3928818..26053181c3 100644
--- a/be/src/vec/exec/vjson_scanner.cpp
+++ b/be/src/vec/exec/vjson_scanner.cpp
@@ -221,7 +221,7 @@ Status
VJsonReader::_vhandle_simple_json(std::vector<MutableColumnPtr>& columns,
// for simple format json
// set valid to true and return OK if succeed.
// set valid to false and return OK if we met an invalid row.
-// return other status if encounter other problmes.
+// return other status if encounter other problems.
Status VJsonReader::_set_column_value(rapidjson::Value& objectValue,
std::vector<MutableColumnPtr>& columns,
const std::vector<SlotDescriptor*>&
slot_descs, bool* valid) {
@@ -600,7 +600,7 @@ Status VSIMDJsonReader::read_json_column(Block& block,
// for simple format json
// set valid to true and return OK if succeed.
// set valid to false and return OK if we met an invalid row.
-// return other status if encounter other problmes.
+// return other status if encounter other problems.
Status VSIMDJsonReader::_set_column_value(simdjson::ondemand::value
objectValue, Block& block,
const std::vector<SlotDescriptor*>&
slot_descs,
bool* valid) {
@@ -724,7 +724,7 @@ Status VSIMDJsonReader::_parse_json(bool* is_empty_row,
bool* eof) {
// read one json string from line reader or file reader and parse it to json
doc.
// return Status::DataQualityError() if data has quality error.
-// return other error if encounter other problemes.
+// return other error if encounter other problems.
// return Status::OK() if parse succeed or reach EOF.
Status VSIMDJsonReader::_parse_json_doc(size_t* size, bool* eof) {
// read a whole message
diff --git a/be/src/vec/exec/vjson_scanner.h b/be/src/vec/exec/vjson_scanner.h
index 409098433e..bdac179644 100644
--- a/be/src/vec/exec/vjson_scanner.h
+++ b/be/src/vec/exec/vjson_scanner.h
@@ -185,7 +185,7 @@ private:
// jsonpath simdjson pointer
// `["$.k1[0]", "$.k2.a"]` -> ["/k1/0", "/k2/a"]
// notice array index not support `*`
- // so we are not fully compatible with previous inplementation by rapidjson
+ // so we are not fully compatible with previous implementation by rapidjson
std::vector<std::string> _parsed_jsonpaths;
std::string _parsed_json_root;
diff --git a/be/src/vec/exec/vtable_function_node.h
b/be/src/vec/exec/vtable_function_node.h
index 72108fa1d9..8e572f55e3 100644
--- a/be/src/vec/exec/vtable_function_node.h
+++ b/be/src/vec/exec/vtable_function_node.h
@@ -42,7 +42,7 @@ private:
Actually we only need to output column c1, no need to output columns
in bitmap table B.
Copy large bitmap columns are very expensive and slow.
- Here we check if the slot is realy used, otherwise we avoid copy it
and just insert a default value.
+ Here we check if the slot is really used, otherwise we avoid copy it
and just insert a default value.
A better solution is:
1. FE: create a new output tuple based on the real output slots;
diff --git a/be/src/vec/exprs/vectorized_agg_fn.cpp
b/be/src/vec/exprs/vectorized_agg_fn.cpp
index d47d9d32be..badf925c3b 100644
--- a/be/src/vec/exprs/vectorized_agg_fn.cpp
+++ b/be/src/vec/exprs/vectorized_agg_fn.cpp
@@ -70,7 +70,7 @@ Status AggFnEvaluator::create(ObjectPool* pool, const TExpr&
desc, const TSortIn
auto sort_size = sort_info.ordering_exprs.size();
auto real_arguments_size =
agg_fn_evaluator->_argument_types_with_sort.size() - sort_size;
- // Child arguments conatins [real arguments, order by arguments], we pass
the arguments
+ // Child arguments contains [real arguments, order by arguments], we pass
the arguments
// to the order by functions
for (int i = 0; i < sort_size; ++i) {
agg_fn_evaluator->_sort_description.emplace_back(real_arguments_size +
i,
diff --git a/be/src/vec/exprs/vexpr.h b/be/src/vec/exprs/vexpr.h
index 891c83360e..35b78635ab 100644
--- a/be/src/vec/exprs/vexpr.h
+++ b/be/src/vec/exprs/vexpr.h
@@ -46,7 +46,7 @@ public:
// resize inserted param column to make sure column size equal to
block.rows()
// and return param column index
static size_t insert_param(Block* block, ColumnWithTypeAndName&& elem,
size_t size) {
- // usualy elem.column always is const column, so we just clone it.
+ // usually elem.column always is const column, so we just clone it.
elem.column = elem.column->clone_resized(size);
block->insert(std::move(elem));
return block->columns() - 1;
diff --git a/be/src/vec/exprs/vin_predicate.cpp
b/be/src/vec/exprs/vin_predicate.cpp
index b0aa2c2f65..cd6e9d70ac 100644
--- a/be/src/vec/exprs/vin_predicate.cpp
+++ b/be/src/vec/exprs/vin_predicate.cpp
@@ -51,7 +51,7 @@ Status VInPredicate::prepare(RuntimeState* state, const
RowDescriptor& desc,
argument_template.emplace_back(std::move(column), child->data_type(),
child->expr_name());
}
- // contruct the proper function_name
+ // construct the proper function_name
std::string head(_is_not_in ? "not_" : "");
std::string real_function_name = head + std::string(function_name);
_function =
SimpleFunctionFactory::instance().get_function(real_function_name,
diff --git a/be/src/vec/functions/plus.cpp b/be/src/vec/functions/plus.cpp
index a020e66985..9e3b8923c8 100644
--- a/be/src/vec/functions/plus.cpp
+++ b/be/src/vec/functions/plus.cpp
@@ -40,7 +40,7 @@ struct PlusImpl {
return DecimalV2Value(a.value() + b.value());
}
- /// Apply operation and check overflow. It's used for Deciamal operations.
@returns true if overflowed, false otherwise.
+ /// Apply operation and check overflow. It's used for Decimal operations.
@returns true if overflowed, false otherwise.
template <typename Result = ResultType>
static inline bool apply(A a, B b, Result& c) {
return common::add_overflow(static_cast<Result>(a), b, c);
diff --git a/be/src/vec/runtime/vdatetime_value.cpp
b/be/src/vec/runtime/vdatetime_value.cpp
index b8622b2a84..8982391764 100644
--- a/be/src/vec/runtime/vdatetime_value.cpp
+++ b/be/src/vec/runtime/vdatetime_value.cpp
@@ -70,7 +70,7 @@ bool VecDateTimeValue::check_date(uint32_t year, uint32_t
month, uint32_t day) {
bool VecDateTimeValue::from_date_str(const char* date_str, int len) {
const char* ptr = date_str;
const char* end = date_str + len;
- // ONLY 2, 6 can follow by a sapce
+ // ONLY 2, 6 can follow by a space
const static int allow_space_mask = 4 | 64;
const static int MAX_DATE_PARTS = 8;
uint32_t date_val[MAX_DATE_PARTS];
@@ -113,7 +113,7 @@ bool VecDateTimeValue::from_date_str(const char* date_str,
int len) {
while (ptr < end && isdigit(*ptr) && (scan_to_delim || field_len--)) {
temp_val = temp_val * 10 + (*ptr++ - '0');
}
- // Imposible
+ // Impossible
if (temp_val > 999999L) {
return false;
}
@@ -188,7 +188,7 @@ bool VecDateTimeValue::from_date_str(const char* date_str,
int len) {
// [YY_PART_YEAR * 10000L + 101, 991231] for two digits year 1970 ~1999
// (991231, 10000101) invalid, because support 1000-01-01
// [10000101, 99991231] for four digits year date value.
-// (99991231, 101000000) invalid, NOTE below this is datetime vaule hh:mm:ss
must exist.
+// (99991231, 101000000) invalid, NOTE below this is datetime vale hh:mm:ss
must exist.
// [101000000, (YY_PART_YEAR - 1)##1231235959] two digits year datetime value
// ((YY_PART_YEAR - 1)##1231235959, YY_PART_YEAR##0101000000) invalid
// ((YY_PART_YEAR)##1231235959, 99991231235959] two digits year datetime value
1970 ~ 1999
@@ -904,7 +904,7 @@ uint8_t VecDateTimeValue::calc_week(const VecDateTimeValue&
value, uint8_t mode,
int days = 0;
*year = value._year;
- // Check wether the first days of this year belongs to last year
+ // Check weather the first days of this year belongs to last year
if (value._month == 1 && value._day <= (7 - weekday_first_day)) {
if (!week_year && ((first_weekday && weekday_first_day != 0) ||
(!first_weekday && weekday_first_day > 3))) {
@@ -1517,7 +1517,7 @@ bool VecDateTimeValue::from_date_format_str(const char*
format, int format_len,
}
// 1. already_set_date_part means _year, _month, _day be set, so we only
set time part
// 2. already_set_time_part means _hour, _minute, _second, _microsecond be
set,
- // so we only neet to set date part
+ // so we only need to set date part
// 3. if both are true, means all part of date_time be set, no need
check_range_and_set_time
bool already_set_date_part = yearday > 0 || (week_num >= 0 && weekday > 0);
if (already_set_date_part && already_set_time_part) return true;
@@ -1826,7 +1826,7 @@ template <typename T>
bool DateV2Value<T>::from_date_str(const char* date_str, int len, int scale) {
const char* ptr = date_str;
const char* end = date_str + len;
- // ONLY 2, 6 can follow by a sapce
+ // ONLY 2, 6 can follow by a space
const static int allow_space_mask = 4 | 64;
const static int MAX_DATE_PARTS = 7;
uint32_t date_val[MAX_DATE_PARTS] = {0};
@@ -1878,7 +1878,7 @@ bool DateV2Value<T>::from_date_str(const char* date_str,
int len, int scale) {
}
}
}
- // Imposible
+ // Impossible
if (temp_val > 999999L) {
return false;
}
@@ -2341,7 +2341,7 @@ bool DateV2Value<T>::from_date_format_str(const char*
format, int format_len, co
}
// 1. already_set_date_part means _year, _month, _day be set, so we only
set time part
// 2. already_set_time_part means _hour, _minute, _second, _microsecond be
set,
- // so we only neet to set date part
+ // so we only need to set date part
// 3. if both are true, means all part of date_time be set, no need
check_range_and_set_time
bool already_set_date_part = yearday > 0 || (week_num >= 0 && weekday > 0);
if (already_set_date_part && already_set_time_part) return true;
@@ -3243,7 +3243,7 @@ uint8_t DateV2Value<T>::calc_week(const uint32_t& day_nr,
const uint16_t& year,
int days = 0;
*to_year = year;
- // Check wether the first days of this year belongs to last year
+ // Check weather the first days of this year belongs to last year
if (month == 1 && day <= (7 - weekday_first_day)) {
if (!week_year && ((first_weekday && weekday_first_day != 0) ||
(!first_weekday && weekday_first_day > 3))) {
diff --git a/be/src/vec/runtime/vdatetime_value.h
b/be/src/vec/runtime/vdatetime_value.h
index 921e8ca715..bced4f962d 100644
--- a/be/src/vec/runtime/vdatetime_value.h
+++ b/be/src/vec/runtime/vdatetime_value.h
@@ -227,8 +227,8 @@ public:
_year(0) {} // before int128 16 bytes ---> after int64 8 bytes
// The data format of DATE/DATETIME is different in storage layer and
execute layer.
- // So we should use diffrent creator to get data from value.
- // We should use create_from_olap_xxx only at binary data scaned from
storage engine and convert to typed data.
+ // So we should use different creator to get data from value.
+ // We should use create_from_olap_xxx only at binary data scanned from
storage engine and convert to typed data.
// At other case, we just use binary_cast<vectorized::Int64,
vectorized::VecDateTimeValue>.
// olap storage layer date data format:
diff --git a/be/src/vec/runtime/vfile_result_writer.cpp
b/be/src/vec/runtime/vfile_result_writer.cpp
index 990a48d65d..bae84e53d3 100644
--- a/be/src/vec/runtime/vfile_result_writer.cpp
+++ b/be/src/vec/runtime/vfile_result_writer.cpp
@@ -462,7 +462,7 @@ Status VFileResultWriter::_send_result() {
// The final stat result include:
// FileNumber, TotalRows, FileSize and URL
- // The type of these field should be conssitent with types defined
+ // The type of these field should be consistent with types defined
// in OutFileClause.java of FE.
MysqlRowBuffer row_buffer;
row_buffer.push_int(_file_idx); // file number
diff --git a/be/src/vec/runtime/vfile_result_writer.h
b/be/src/vec/runtime/vfile_result_writer.h
index d16946491a..31bbc5a55f 100644
--- a/be/src/vec/runtime/vfile_result_writer.h
+++ b/be/src/vec/runtime/vfile_result_writer.h
@@ -91,7 +91,7 @@ private:
// TODO(cmy): I simply use a stringstrteam to buffer the data, to avoid
calling
// file writer's write() for every single row.
// But this cannot solve the problem of a row of data that is too large.
- // For example: bitmap_to_string() may return large volumn of data.
+ // For example: bitmap_to_string() may return large volume of data.
// And the speed is relative low, in my test, is about 6.5MB/s.
std::stringstream _plain_text_outstream;
static const size_t OUTSTREAM_BUFFER_SIZE_BYTES;
diff --git a/be/test/exec/broker_scanner_test.cpp
b/be/test/exec/broker_scanner_test.cpp
index 65387e575b..e3c784dd34 100644
--- a/be/test/exec/broker_scanner_test.cpp
+++ b/be/test/exec/broker_scanner_test.cpp
@@ -381,7 +381,7 @@ TEST_F(BrokerScannerTest, normal) {
EXPECT_EQ(5, *(int*)tuple->get_slot(4));
EXPECT_EQ(6, *(int*)tuple->get_slot(8));
- // 7, 8, unqualitifed
+ // 7, 8, unqualified
st = scanner.get_next(tuple, &tuple_pool, &eof, &fill_tuple);
EXPECT_TRUE(st.ok());
EXPECT_FALSE(eof);
diff --git a/be/test/testutil/desc_tbl_builder.h
b/be/test/testutil/desc_tbl_builder.h
index 3fc5684dcf..5669f7c4ed 100644
--- a/be/test/testutil/desc_tbl_builder.h
+++ b/be/test/testutil/desc_tbl_builder.h
@@ -30,7 +30,7 @@ class TupleDescBuilder;
// associated with those tuples.
// TupleIds are monotonically increasing from 0 for each declare_tuple, and
// SlotIds increase similarly, but are always greater than all TupleIds.
-// Unlike FE, slots are not reordered based on size, and padding is not addded.
+// Unlike FE, slots are not reordered based on size, and padding is not added.
//
// Example usage:
// DescriptorTblBuilder builder;
diff --git a/be/test/util/metrics_test.cpp b/be/test/util/metrics_test.cpp
index 9722e660ea..386904c884 100644
--- a/be/test/util/metrics_test.cpp
+++ b/be/test/util/metrics_test.cpp
@@ -448,7 +448,7 @@ test_registry_task_duration_standard_deviation 28.8661
}
{
- // Register one histogram metric with lables to the entity
+ // Register one histogram metric with labels to the entity
auto entity = registry.register_entity("test_entity", {{"instance",
"test"}});
MetricPrototype task_duration_type(MetricType::HISTOGRAM,
MetricUnit::MILLISECONDS,
diff --git a/be/test/vec/function/function_test_util.h
b/be/test/vec/function/function_test_util.h
index 49e75cf6be..8da9e6f1b0 100644
--- a/be/test/vec/function/function_test_util.h
+++ b/be/test/vec/function/function_test_util.h
@@ -202,7 +202,7 @@ Status check_function(const std::string& func_name, const
InputTypeSet& input_ty
block.insert({std::move(column), desc.data_type, desc.col_name});
}
- // 1.2 parepare args for function call
+ // 1.2 prepare args for function call
ColumnNumbers arguments;
std::vector<doris_udf::FunctionContext::TypeDesc> arg_types;
std::vector<std::shared_ptr<ColumnPtrWrapper>> constant_col_ptrs;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]