github-actions[bot] commented on code in PR #34328:
URL: https://github.com/apache/doris/pull/34328#discussion_r1584068140


##########
be/src/pipeline/exec/aggregation_source_operator.cpp:
##########
@@ -246,63 +252,70 @@ Status 
AggLocalState::_get_with_serialized_key_result(RuntimeState* state, vecto
 
     SCOPED_TIMER(_get_results_timer);
     std::visit(
-            [&](auto&& agg_method) -> void {
-                auto& data = *agg_method.hash_table;
-                agg_method.init_iterator();
-                const auto size = std::min(data.size(), 
size_t(state->batch_size()));
-                using KeyType = 
std::decay_t<decltype(agg_method.iterator->get_first())>;
-                std::vector<KeyType> keys(size);
-                if (shared_state.values.size() < size) {
-                    shared_state.values.resize(size);
-                }
-
-                size_t num_rows = 0;
-                shared_state.aggregate_data_container->init_once();
-                auto& iter = shared_state.aggregate_data_container->iterator;
+            vectorized::Overload {
+                    [&](std::monostate& arg) -> void {
+                        throw doris::Exception(ErrorCode::INTERNAL_ERROR, 
"uninited hash table");
+                    },
+                    [&](auto& agg_method) -> void {
+                        auto& data = *agg_method.hash_table;
+                        agg_method.init_iterator();
+                        const auto size = std::min(data.size(), 
size_t(state->batch_size()));
+                        using KeyType = 
std::decay_t<decltype(agg_method.iterator->get_first())>;
+                        std::vector<KeyType> keys(size);
+                        if (shared_state.values.size() < size) {
+                            shared_state.values.resize(size);
+                        }
 
-                {
-                    SCOPED_TIMER(_hash_table_iterate_timer);
-                    while (iter != 
shared_state.aggregate_data_container->end() &&
-                           num_rows < state->batch_size()) {
-                        keys[num_rows] = iter.template get_key<KeyType>();
-                        shared_state.values[num_rows] = 
iter.get_aggregate_data();
-                        ++iter;
-                        ++num_rows;
-                    }
-                }
+                        size_t num_rows = 0;
+                        shared_state.aggregate_data_container->init_once();
+                        auto& iter = 
shared_state.aggregate_data_container->iterator;
+
+                        {
+                            SCOPED_TIMER(_hash_table_iterate_timer);
+                            while (iter != 
shared_state.aggregate_data_container->end() &&
+                                   num_rows < state->batch_size()) {
+                                keys[num_rows] = iter.template 
get_key<KeyType>();
+                                shared_state.values[num_rows] = 
iter.get_aggregate_data();
+                                ++iter;
+                                ++num_rows;
+                            }
+                        }
 
-                {
-                    SCOPED_TIMER(_insert_keys_to_column_timer);
-                    agg_method.insert_keys_into_columns(keys, key_columns, 
num_rows);
-                }
+                        {
+                            SCOPED_TIMER(_insert_keys_to_column_timer);
+                            agg_method.insert_keys_into_columns(keys, 
key_columns, num_rows);
+                        }
 
-                for (size_t i = 0; i < 
shared_state.aggregate_evaluators.size(); ++i) {
-                    
shared_state.aggregate_evaluators[i]->insert_result_info_vec(
-                            shared_state.values, 
shared_state.offsets_of_aggregate_states[i],
-                            value_columns[i].get(), num_rows);
-                }
+                        for (size_t i = 0; i < 
shared_state.aggregate_evaluators.size(); ++i) {
+                            
shared_state.aggregate_evaluators[i]->insert_result_info_vec(
+                                    shared_state.values,
+                                    
shared_state.offsets_of_aggregate_states[i],
+                                    value_columns[i].get(), num_rows);
+                        }
 
-                if (iter == shared_state.aggregate_data_container->end()) {
-                    if (agg_method.hash_table->has_null_key_data()) {
-                        // only one key of group by support wrap null key
-                        // here need additional processing logic on the null 
key / value
-                        DCHECK(key_columns.size() == 1);
-                        DCHECK(key_columns[0]->is_nullable());
-                        if (key_columns[0]->size() < state->batch_size()) {
-                            key_columns[0]->insert_data(nullptr, 0);
-                            auto mapped = agg_method.hash_table->template 
get_null_key_data<
-                                    vectorized::AggregateDataPtr>();
-                            for (size_t i = 0; i < 
shared_state.aggregate_evaluators.size(); ++i)
-                                
shared_state.aggregate_evaluators[i]->insert_result_info(
-                                        mapped + 
shared_state.offsets_of_aggregate_states[i],
-                                        value_columns[i].get());
-                            *eos = true;
+                        if (iter == 
shared_state.aggregate_data_container->end()) {
+                            if (agg_method.hash_table->has_null_key_data()) {
+                                // only one key of group by support wrap null 
key
+                                // here need additional processing logic on 
the null key / value
+                                DCHECK(key_columns.size() == 1);
+                                DCHECK(key_columns[0]->is_nullable());
+                                if (key_columns[0]->size() < 
state->batch_size()) {
+                                    key_columns[0]->insert_data(nullptr, 0);
+                                    auto mapped = 
agg_method.hash_table->template get_null_key_data<
+                                            vectorized::AggregateDataPtr>();
+                                    for (size_t i = 0; i < 
shared_state.aggregate_evaluators.size();
+                                         ++i)

Review Comment:
   warning: statement should be inside braces 
[readability-braces-around-statements]
   
   ```suggestion
                                            ++i) {
   ```
   
   be/src/pipeline/exec/aggregation_source_operator.cpp:310:
   ```diff
   -                                                 value_columns[i].get());
   +                                                 value_columns[i].get());
   + }
   ```
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to