morningman commented on code in PR #10512:
URL: https://github.com/apache/doris/pull/10512#discussion_r916052685


##########
be/src/exec/arrow/parquet_reader.cpp:
##########
@@ -239,8 +240,32 @@ Status ParquetReaderWrap::handle_timestamp(const 
std::shared_ptr<arrow::Timestam
     return Status::OK();
 }
 
+Status ParquetReaderWrap::init_parquet_type() {

Review Comment:
   Why extract this method out of the `init_reader()`?



##########
be/src/exec/arrow/parquet_row_group_reader.cpp:
##########
@@ -0,0 +1,567 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "exec/arrow/parquet_row_group_reader.h"
+
+#include <exprs/expr_context.h>
+#include <exprs/in_predicate.h>
+#include <parquet/encoding.h>
+
+#include <cstring>
+
+#define _PLAIN_DECODE(T, value, min_bytes, max_bytes, out_value, out_min, 
out_max) \
+    const T out_min = reinterpret_cast<const T*>(min_bytes)[0];                
    \
+    const T out_max = reinterpret_cast<const T*>(max_bytes)[0];                
    \
+    T out_value = *((T*)value);
+
+#define _PLAIN_DECODE_SINGLE(T, value, bytes, conjunct_value, out) \
+    const T out = reinterpret_cast<const T*>(bytes)[0];            \
+    T conjunct_value = *((T*)value);
+
+#define _FILTER_GROUP_BY_EQ_PRED(conjunct_value, min, max) \
+    if (conjunct_value < min || conjunct_value > max) {    \
+        return true;                                       \
+    }
+
+#define _FILTER_GROUP_BY_GT_PRED(conjunct_value, max) \
+    if (max <= conjunct_value) {                      \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_GE_PRED(conjunct_value, max) \
+    if (max < conjunct_value) {                       \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_LT_PRED(conjunct_value, min) \
+    if (min >= conjunct_value) {                      \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_LE_PRED(conjunct_value, min) \
+    if (min > conjunct_value) {                       \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_IN(T, in_pred_values, min_bytes, max_bytes) \
+    std::vector<T> in_values;                                        \
+    for (auto val : in_pred_values) {                                \
+        T value = reinterpret_cast<T*>(val)[0];                      \
+        in_values.emplace_back(value);                               \
+    }                                                                \
+    if (in_values.empty()) {                                         \
+        return false;                                                \
+    }                                                                \
+    std::sort(in_values.begin(), in_values.end());                   \
+    T in_min = in_values.front();                                    \
+    T in_max = in_values.back();                                     \
+    const T group_min = reinterpret_cast<const T*>(min_bytes)[0];    \
+    const T group_max = reinterpret_cast<const T*>(max_bytes)[0];    \
+    if (in_max < group_min || in_min > group_max) {                  \
+        return true;                                                 \
+    }
+
+namespace doris {
+
+RowGroupReader::RowGroupReader(RuntimeProfile* profile,
+                               const std::vector<ExprContext*>& conjunct_ctxs,
+                               std::shared_ptr<parquet::FileMetaData>& 
file_metadata,
+                               ParquetReaderWrap* parent)
+        : _conjunct_ctxs(conjunct_ctxs),
+          _file_metadata(file_metadata),
+          _profile(profile),
+          _parent(parent) {}
+
+RowGroupReader::~RowGroupReader() {
+    _slot_conjuncts.clear();
+    _filter_group.clear();
+}
+
+Status RowGroupReader::init_filter_groups(const TupleDescriptor* tuple_desc,
+                                          const std::map<std::string, int>& 
map_column,
+                                          const std::vector<int>& 
include_column_ids) {
+    std::unordered_set<int> parquet_column_ids(include_column_ids.begin(),
+                                               include_column_ids.end());
+    _init_conjuncts(tuple_desc, map_column, parquet_column_ids);
+    int total_group = _file_metadata->num_row_groups();
+    _parent->statistics()->total_groups = total_group;
+    _parent->statistics()->total_rows = _file_metadata->num_rows();
+
+    int64_t filtered_num_row_groups = 0;
+    int64_t filtered_num_rows = 0;
+    int64_t filtered_total_byte_size = 0;
+    bool need_filter = false;
+    for (int row_group_id = 0; row_group_id < total_group; row_group_id++) {
+        auto row_group_meta = _file_metadata->RowGroup(row_group_id);
+        for (SlotId slot_id = 0; slot_id < tuple_desc->slots().size(); 
slot_id++) {
+            const std::string& col_name = 
tuple_desc->slots()[slot_id]->col_name();
+            auto col_iter = map_column.find(col_name);
+            if (col_iter == map_column.end()) {
+                continue;
+            }
+            int parquet_col_id = col_iter->second;
+            if (parquet_column_ids.end() == 
parquet_column_ids.find(parquet_col_id)) {
+                // Column not exist in parquet file
+                continue;
+            }
+            auto slot_iter = _slot_conjuncts.find(slot_id);
+            if (slot_iter == _slot_conjuncts.end()) {
+                continue;
+            }
+            auto statistic = 
row_group_meta->ColumnChunk(parquet_col_id)->statistics();
+            if (!statistic->HasMinMax()) {
+                continue;
+            }
+            // Min-max of statistic is plain-encoded value
+            const std::string& min = statistic->EncodeMin();
+            const std::string& max = statistic->EncodeMax();
+
+            need_filter = _determine_filter_row_group(slot_iter->second, min, 
max);
+            if (need_filter) {
+                filtered_num_row_groups++;
+                filtered_num_rows += row_group_meta->num_rows();
+                filtered_total_byte_size += row_group_meta->total_byte_size();
+                row_group_meta->schema()->name();
+                LOG(INFO) << "Filter row group id: " << row_group_id;

Review Comment:
   VLOG



##########
be/src/exec/arrow/parquet_reader.cpp:
##########
@@ -239,8 +240,32 @@ Status ParquetReaderWrap::handle_timestamp(const 
std::shared_ptr<arrow::Timestam
     return Status::OK();
 }
 
+Status ParquetReaderWrap::init_parquet_type() {
+    // read batch
+    RETURN_IF_ERROR(read_next_batch());
+    _current_line_of_batch = 0;
+    if (_batch == nullptr) {
+        return Status::OK();
+    }
+    //save column type
+    std::shared_ptr<arrow::Schema> field_schema = _batch->schema();
+    for (int i = 0; i < _include_column_ids.size(); i++) {
+        std::shared_ptr<arrow::Field> field = field_schema->field(i);
+        if (!field) {
+            LOG(WARNING) << "Get field schema failed. Column order:" << i;
+            return Status::InternalError(_status.ToString());
+        }
+        _parquet_column_type.emplace_back(field->type()->id());
+    }
+    return Status::OK();
+}
+
 Status ParquetReaderWrap::read(Tuple* tuple, const 
std::vector<SlotDescriptor*>& tuple_slot_descs,
                                MemPool* mem_pool, bool* eof) {
+    if (_batch == nullptr) {

Review Comment:
   Remove this. I think we don't need to modify row-based engine's logic.



##########
be/src/exec/base_scanner.cpp:
##########
@@ -95,6 +95,11 @@ Status BaseScanner::open() {
     return Status::OK();
 }
 
+void BaseScanner::reg_conjunct_ctxs(TupleId tupleId, std::vector<ExprContext*> 
conjunct_ctxs) {

Review Comment:
   ```suggestion
   void BaseScanner::reg_conjunct_ctxs(const TupleId& tupleId, const 
std::vector<ExprContext*>& conjunct_ctxs) {
   ```



##########
be/src/exec/arrow/parquet_reader.cpp:
##########
@@ -118,6 +112,7 @@ Status ParquetReaderWrap::init_reader(const 
std::vector<SlotDescriptor*>& tuple_
 }
 
 void ParquetReaderWrap::close() {
+    LOG(INFO) << "ParquetReaderWrap _closed: " << _closed;

Review Comment:
   Change it to VLOG, or just remove it.



##########
be/src/exec/arrow/parquet_reader.cpp:
##########
@@ -554,6 +589,9 @@ void ParquetReaderWrap::prefetch_batch() {
 Status ParquetReaderWrap::read_next_batch() {
     std::unique_lock<std::mutex> lock(_mtx);
     while (!_closed && _queue.empty()) {

Review Comment:
   `read_next_batch()` is for row-based engine, no need to modify it.



##########
be/src/exec/arrow/parquet_row_group_reader.cpp:
##########
@@ -0,0 +1,567 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "exec/arrow/parquet_row_group_reader.h"
+
+#include <exprs/expr_context.h>
+#include <exprs/in_predicate.h>
+#include <parquet/encoding.h>
+
+#include <cstring>
+
+#define _PLAIN_DECODE(T, value, min_bytes, max_bytes, out_value, out_min, 
out_max) \
+    const T out_min = reinterpret_cast<const T*>(min_bytes)[0];                
    \
+    const T out_max = reinterpret_cast<const T*>(max_bytes)[0];                
    \
+    T out_value = *((T*)value);
+
+#define _PLAIN_DECODE_SINGLE(T, value, bytes, conjunct_value, out) \
+    const T out = reinterpret_cast<const T*>(bytes)[0];            \
+    T conjunct_value = *((T*)value);
+
+#define _FILTER_GROUP_BY_EQ_PRED(conjunct_value, min, max) \
+    if (conjunct_value < min || conjunct_value > max) {    \
+        return true;                                       \
+    }
+
+#define _FILTER_GROUP_BY_GT_PRED(conjunct_value, max) \
+    if (max <= conjunct_value) {                      \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_GE_PRED(conjunct_value, max) \
+    if (max < conjunct_value) {                       \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_LT_PRED(conjunct_value, min) \
+    if (min >= conjunct_value) {                      \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_LE_PRED(conjunct_value, min) \
+    if (min > conjunct_value) {                       \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_IN(T, in_pred_values, min_bytes, max_bytes) \
+    std::vector<T> in_values;                                        \
+    for (auto val : in_pred_values) {                                \
+        T value = reinterpret_cast<T*>(val)[0];                      \
+        in_values.emplace_back(value);                               \
+    }                                                                \
+    if (in_values.empty()) {                                         \
+        return false;                                                \
+    }                                                                \
+    std::sort(in_values.begin(), in_values.end());                   \
+    T in_min = in_values.front();                                    \
+    T in_max = in_values.back();                                     \
+    const T group_min = reinterpret_cast<const T*>(min_bytes)[0];    \
+    const T group_max = reinterpret_cast<const T*>(max_bytes)[0];    \
+    if (in_max < group_min || in_min > group_max) {                  \
+        return true;                                                 \
+    }
+
+namespace doris {
+
+RowGroupReader::RowGroupReader(RuntimeProfile* profile,
+                               const std::vector<ExprContext*>& conjunct_ctxs,
+                               std::shared_ptr<parquet::FileMetaData>& 
file_metadata,
+                               ParquetReaderWrap* parent)
+        : _conjunct_ctxs(conjunct_ctxs),
+          _file_metadata(file_metadata),
+          _profile(profile),
+          _parent(parent) {}
+
+RowGroupReader::~RowGroupReader() {
+    _slot_conjuncts.clear();
+    _filter_group.clear();
+}
+
+Status RowGroupReader::init_filter_groups(const TupleDescriptor* tuple_desc,
+                                          const std::map<std::string, int>& 
map_column,
+                                          const std::vector<int>& 
include_column_ids) {
+    std::unordered_set<int> parquet_column_ids(include_column_ids.begin(),
+                                               include_column_ids.end());
+    _init_conjuncts(tuple_desc, map_column, parquet_column_ids);
+    int total_group = _file_metadata->num_row_groups();
+    _parent->statistics()->total_groups = total_group;
+    _parent->statistics()->total_rows = _file_metadata->num_rows();
+
+    int64_t filtered_num_row_groups = 0;
+    int64_t filtered_num_rows = 0;
+    int64_t filtered_total_byte_size = 0;
+    bool need_filter = false;
+    for (int row_group_id = 0; row_group_id < total_group; row_group_id++) {
+        auto row_group_meta = _file_metadata->RowGroup(row_group_id);
+        for (SlotId slot_id = 0; slot_id < tuple_desc->slots().size(); 
slot_id++) {
+            const std::string& col_name = 
tuple_desc->slots()[slot_id]->col_name();
+            auto col_iter = map_column.find(col_name);
+            if (col_iter == map_column.end()) {
+                continue;
+            }
+            int parquet_col_id = col_iter->second;
+            if (parquet_column_ids.end() == 
parquet_column_ids.find(parquet_col_id)) {
+                // Column not exist in parquet file
+                continue;
+            }
+            auto slot_iter = _slot_conjuncts.find(slot_id);
+            if (slot_iter == _slot_conjuncts.end()) {
+                continue;
+            }
+            auto statistic = 
row_group_meta->ColumnChunk(parquet_col_id)->statistics();
+            if (!statistic->HasMinMax()) {
+                continue;
+            }
+            // Min-max of statistic is plain-encoded value
+            const std::string& min = statistic->EncodeMin();
+            const std::string& max = statistic->EncodeMax();
+
+            need_filter = _determine_filter_row_group(slot_iter->second, min, 
max);
+            if (need_filter) {
+                filtered_num_row_groups++;
+                filtered_num_rows += row_group_meta->num_rows();
+                filtered_total_byte_size += row_group_meta->total_byte_size();
+                row_group_meta->schema()->name();
+                LOG(INFO) << "Filter row group id: " << row_group_id;
+                _filter_group.emplace(row_group_id);

Review Comment:
   Once this group has been filtered, we can skip the rest of conjunct and go 
to next row group.



##########
be/src/exec/base_scanner.h:
##########
@@ -142,6 +145,9 @@ class BaseScanner {
     vectorized::Block _src_block;
     int _num_of_columns_from_file;
 
+    TupleId _tupleId;

Review Comment:
   Add comment to explain what this tuple id is.



##########
be/src/exec/arrow/parquet_row_group_reader.cpp:
##########
@@ -0,0 +1,567 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "exec/arrow/parquet_row_group_reader.h"
+
+#include <exprs/expr_context.h>
+#include <exprs/in_predicate.h>
+#include <parquet/encoding.h>
+
+#include <cstring>
+
+#define _PLAIN_DECODE(T, value, min_bytes, max_bytes, out_value, out_min, 
out_max) \
+    const T out_min = reinterpret_cast<const T*>(min_bytes)[0];                
    \
+    const T out_max = reinterpret_cast<const T*>(max_bytes)[0];                
    \
+    T out_value = *((T*)value);
+
+#define _PLAIN_DECODE_SINGLE(T, value, bytes, conjunct_value, out) \
+    const T out = reinterpret_cast<const T*>(bytes)[0];            \
+    T conjunct_value = *((T*)value);
+
+#define _FILTER_GROUP_BY_EQ_PRED(conjunct_value, min, max) \
+    if (conjunct_value < min || conjunct_value > max) {    \
+        return true;                                       \
+    }
+
+#define _FILTER_GROUP_BY_GT_PRED(conjunct_value, max) \
+    if (max <= conjunct_value) {                      \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_GE_PRED(conjunct_value, max) \
+    if (max < conjunct_value) {                       \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_LT_PRED(conjunct_value, min) \
+    if (min >= conjunct_value) {                      \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_LE_PRED(conjunct_value, min) \
+    if (min > conjunct_value) {                       \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_IN(T, in_pred_values, min_bytes, max_bytes) \
+    std::vector<T> in_values;                                        \
+    for (auto val : in_pred_values) {                                \
+        T value = reinterpret_cast<T*>(val)[0];                      \
+        in_values.emplace_back(value);                               \
+    }                                                                \
+    if (in_values.empty()) {                                         \
+        return false;                                                \
+    }                                                                \
+    std::sort(in_values.begin(), in_values.end());                   \
+    T in_min = in_values.front();                                    \
+    T in_max = in_values.back();                                     \
+    const T group_min = reinterpret_cast<const T*>(min_bytes)[0];    \
+    const T group_max = reinterpret_cast<const T*>(max_bytes)[0];    \
+    if (in_max < group_min || in_min > group_max) {                  \
+        return true;                                                 \
+    }
+
+namespace doris {
+
+RowGroupReader::RowGroupReader(RuntimeProfile* profile,
+                               const std::vector<ExprContext*>& conjunct_ctxs,
+                               std::shared_ptr<parquet::FileMetaData>& 
file_metadata,
+                               ParquetReaderWrap* parent)
+        : _conjunct_ctxs(conjunct_ctxs),
+          _file_metadata(file_metadata),
+          _profile(profile),
+          _parent(parent) {}
+
+RowGroupReader::~RowGroupReader() {
+    _slot_conjuncts.clear();
+    _filter_group.clear();
+}
+
+Status RowGroupReader::init_filter_groups(const TupleDescriptor* tuple_desc,
+                                          const std::map<std::string, int>& 
map_column,
+                                          const std::vector<int>& 
include_column_ids) {
+    std::unordered_set<int> parquet_column_ids(include_column_ids.begin(),
+                                               include_column_ids.end());
+    _init_conjuncts(tuple_desc, map_column, parquet_column_ids);
+    int total_group = _file_metadata->num_row_groups();
+    _parent->statistics()->total_groups = total_group;
+    _parent->statistics()->total_rows = _file_metadata->num_rows();
+
+    int64_t filtered_num_row_groups = 0;
+    int64_t filtered_num_rows = 0;
+    int64_t filtered_total_byte_size = 0;
+    bool need_filter = false;
+    for (int row_group_id = 0; row_group_id < total_group; row_group_id++) {
+        auto row_group_meta = _file_metadata->RowGroup(row_group_id);
+        for (SlotId slot_id = 0; slot_id < tuple_desc->slots().size(); 
slot_id++) {
+            const std::string& col_name = 
tuple_desc->slots()[slot_id]->col_name();
+            auto col_iter = map_column.find(col_name);
+            if (col_iter == map_column.end()) {
+                continue;
+            }
+            int parquet_col_id = col_iter->second;
+            if (parquet_column_ids.end() == 
parquet_column_ids.find(parquet_col_id)) {
+                // Column not exist in parquet file
+                continue;
+            }
+            auto slot_iter = _slot_conjuncts.find(slot_id);
+            if (slot_iter == _slot_conjuncts.end()) {
+                continue;
+            }
+            auto statistic = 
row_group_meta->ColumnChunk(parquet_col_id)->statistics();
+            if (!statistic->HasMinMax()) {
+                continue;
+            }
+            // Min-max of statistic is plain-encoded value
+            const std::string& min = statistic->EncodeMin();
+            const std::string& max = statistic->EncodeMax();
+
+            need_filter = _determine_filter_row_group(slot_iter->second, min, 
max);
+            if (need_filter) {
+                filtered_num_row_groups++;
+                filtered_num_rows += row_group_meta->num_rows();
+                filtered_total_byte_size += row_group_meta->total_byte_size();
+                row_group_meta->schema()->name();
+                LOG(INFO) << "Filter row group id: " << row_group_id;
+                _filter_group.emplace(row_group_id);
+            }
+        }
+    }
+    if (need_filter) {
+        _parent->statistics()->filtered_row_groups = filtered_num_row_groups;
+        _parent->statistics()->filtered_rows = filtered_num_rows;
+        _parent->statistics()->filtered_total_bytes = filtered_total_byte_size;
+        LOG(INFO) << "Parquet file: " << _file_metadata->schema()->name()

Review Comment:
   VLOG



##########
be/src/vec/exec/file_scanner.h:
##########
@@ -85,6 +87,10 @@ class FileScanner {
     std::unique_ptr<vectorized::VExprContext*> _vpre_filter_ctx_ptr;
     int _num_of_columns_from_file;
 
+    // File formats based push down predicate
+    std::vector<ExprContext*> _conjunct_ctxs;
+    TupleId _tupleId;

Review Comment:
   Add comment for this tuple id



##########
be/src/vec/exec/file_scanner.cpp:
##########
@@ -69,6 +69,11 @@ Status FileScanner::open() {
     return Status::OK();
 }
 
+void FileScanner::reg_conjunct_ctxs(TupleId tupleId, std::vector<ExprContext*> 
conjunct_ctxs) {

Review Comment:
   ```suggestion
   void FileScanner::reg_conjunct_ctxs(const TupleId& tupleId, const 
std::vector<ExprContext*>& conjunct_ctxs) {
   ```



##########
be/src/exec/arrow/parquet_row_group_reader.cpp:
##########
@@ -0,0 +1,567 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "exec/arrow/parquet_row_group_reader.h"
+
+#include <exprs/expr_context.h>
+#include <exprs/in_predicate.h>
+#include <parquet/encoding.h>
+
+#include <cstring>
+
+#define _PLAIN_DECODE(T, value, min_bytes, max_bytes, out_value, out_min, 
out_max) \
+    const T out_min = reinterpret_cast<const T*>(min_bytes)[0];                
    \
+    const T out_max = reinterpret_cast<const T*>(max_bytes)[0];                
    \
+    T out_value = *((T*)value);
+
+#define _PLAIN_DECODE_SINGLE(T, value, bytes, conjunct_value, out) \
+    const T out = reinterpret_cast<const T*>(bytes)[0];            \
+    T conjunct_value = *((T*)value);
+
+#define _FILTER_GROUP_BY_EQ_PRED(conjunct_value, min, max) \
+    if (conjunct_value < min || conjunct_value > max) {    \
+        return true;                                       \
+    }
+
+#define _FILTER_GROUP_BY_GT_PRED(conjunct_value, max) \
+    if (max <= conjunct_value) {                      \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_GE_PRED(conjunct_value, max) \
+    if (max < conjunct_value) {                       \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_LT_PRED(conjunct_value, min) \
+    if (min >= conjunct_value) {                      \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_LE_PRED(conjunct_value, min) \
+    if (min > conjunct_value) {                       \
+        return true;                                  \
+    }
+
+#define _FILTER_GROUP_BY_IN(T, in_pred_values, min_bytes, max_bytes) \
+    std::vector<T> in_values;                                        \
+    for (auto val : in_pred_values) {                                \
+        T value = reinterpret_cast<T*>(val)[0];                      \
+        in_values.emplace_back(value);                               \
+    }                                                                \
+    if (in_values.empty()) {                                         \
+        return false;                                                \
+    }                                                                \
+    std::sort(in_values.begin(), in_values.end());                   \
+    T in_min = in_values.front();                                    \
+    T in_max = in_values.back();                                     \
+    const T group_min = reinterpret_cast<const T*>(min_bytes)[0];    \
+    const T group_max = reinterpret_cast<const T*>(max_bytes)[0];    \
+    if (in_max < group_min || in_min > group_max) {                  \
+        return true;                                                 \
+    }
+
+namespace doris {
+
+RowGroupReader::RowGroupReader(RuntimeProfile* profile,
+                               const std::vector<ExprContext*>& conjunct_ctxs,
+                               std::shared_ptr<parquet::FileMetaData>& 
file_metadata,
+                               ParquetReaderWrap* parent)
+        : _conjunct_ctxs(conjunct_ctxs),
+          _file_metadata(file_metadata),
+          _profile(profile),
+          _parent(parent) {}
+
+RowGroupReader::~RowGroupReader() {
+    _slot_conjuncts.clear();
+    _filter_group.clear();
+}
+
+Status RowGroupReader::init_filter_groups(const TupleDescriptor* tuple_desc,
+                                          const std::map<std::string, int>& 
map_column,
+                                          const std::vector<int>& 
include_column_ids) {
+    std::unordered_set<int> parquet_column_ids(include_column_ids.begin(),
+                                               include_column_ids.end());
+    _init_conjuncts(tuple_desc, map_column, parquet_column_ids);
+    int total_group = _file_metadata->num_row_groups();
+    _parent->statistics()->total_groups = total_group;
+    _parent->statistics()->total_rows = _file_metadata->num_rows();
+
+    int64_t filtered_num_row_groups = 0;
+    int64_t filtered_num_rows = 0;
+    int64_t filtered_total_byte_size = 0;
+    bool need_filter = false;
+    for (int row_group_id = 0; row_group_id < total_group; row_group_id++) {
+        auto row_group_meta = _file_metadata->RowGroup(row_group_id);
+        for (SlotId slot_id = 0; slot_id < tuple_desc->slots().size(); 
slot_id++) {
+            const std::string& col_name = 
tuple_desc->slots()[slot_id]->col_name();
+            auto col_iter = map_column.find(col_name);
+            if (col_iter == map_column.end()) {
+                continue;
+            }
+            int parquet_col_id = col_iter->second;
+            if (parquet_column_ids.end() == 
parquet_column_ids.find(parquet_col_id)) {
+                // Column not exist in parquet file
+                continue;
+            }
+            auto slot_iter = _slot_conjuncts.find(slot_id);
+            if (slot_iter == _slot_conjuncts.end()) {
+                continue;
+            }
+            auto statistic = 
row_group_meta->ColumnChunk(parquet_col_id)->statistics();
+            if (!statistic->HasMinMax()) {
+                continue;
+            }
+            // Min-max of statistic is plain-encoded value
+            const std::string& min = statistic->EncodeMin();
+            const std::string& max = statistic->EncodeMax();
+
+            need_filter = _determine_filter_row_group(slot_iter->second, min, 
max);
+            if (need_filter) {
+                filtered_num_row_groups++;
+                filtered_num_rows += row_group_meta->num_rows();
+                filtered_total_byte_size += row_group_meta->total_byte_size();
+                row_group_meta->schema()->name();

Review Comment:
   unused row?



##########
be/src/exec/arrow/parquet_reader.cpp:
##########
@@ -187,19 +182,25 @@ Status ParquetReaderWrap::read_record_batch(bool* eof) {
 }
 
 Status ParquetReaderWrap::next_batch(std::shared_ptr<arrow::RecordBatch>* 
batch, bool* eof) {
-    if (_batch->num_rows() == 0 || _current_line_of_batch != 0 || 
_current_line_of_group != 0) {
-        RETURN_IF_ERROR(read_record_batch(eof));
+    std::unique_lock<std::mutex> lock(_mtx);
+    while (!_closed && _queue.empty()) {
+        if (_batch_eof) {
+            _include_column_ids.clear();
+            *eof = true;
+            _batch_eof = false;

Review Comment:
   Why set it back to false?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to