liurenjie1024 commented on code in PR #558: URL: https://github.com/apache/iceberg-rust/pull/558#discussion_r1731161779
########## crates/iceberg/Cargo.toml: ########## @@ -83,5 +83,6 @@ ctor = { workspace = true } iceberg-catalog-memory = { workspace = true } iceberg_test_utils = { path = "../test_utils", features = ["tests"] } pretty_assertions = { workspace = true } +rand = "0.8" Review Comment: Move this to workspace? ########## crates/iceberg/src/expr/visitors/row_group_metrics_evaluator.rs: ########## @@ -140,21 +140,79 @@ impl<'a> RowGroupMetricsEvaluator<'a> { return Ok(None); }; - Ok(Some(Datum::try_from_bytes( - stats.min_bytes(), - primitive_type, - )?)) + if !stats.has_min_max_set() { + return Ok(None); + } + + Ok(Some(match (primitive_type, stats) { + (PrimitiveType::Boolean, Statistics::Boolean(stats)) => Datum::bool(*stats.min()), Review Comment: This may not be a blocker for this pr, but I would suggest to put similar codes to same place, and there are already similar codes in [parquet writer](https://github.com/apache/iceberg-rust/blob/b36d1c606695aa06e0d359582eeb1a0080d90bf0/crates/iceberg/src/writer/file_writer/parquet_writer.rs#L240) ########## crates/iceberg/src/expr/visitors/row_group_metrics_evaluator.rs: ########## @@ -0,0 +1,1927 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! Evaluates Parquet Row Group metrics + +use std::collections::HashMap; + +use fnv::FnvHashSet; +use parquet::file::metadata::RowGroupMetaData; +use parquet::file::statistics::Statistics; + +use crate::expr::visitors::bound_predicate_visitor::{visit, BoundPredicateVisitor}; +use crate::expr::{BoundPredicate, BoundReference}; +use crate::spec::{Datum, PrimitiveLiteral, PrimitiveType, Schema}; +use crate::{Error, ErrorKind, Result}; + +pub(crate) struct RowGroupMetricsEvaluator<'a> { + row_group_metadata: &'a RowGroupMetaData, + iceberg_field_id_to_parquet_column_index: &'a HashMap<i32, usize>, + snapshot_schema: &'a Schema, +} + +const IN_PREDICATE_LIMIT: usize = 200; +const ROW_GROUP_MIGHT_MATCH: Result<bool> = Ok(true); +const ROW_GROUP_CANT_MATCH: Result<bool> = Ok(false); + +impl<'a> RowGroupMetricsEvaluator<'a> { + fn new( + row_group_metadata: &'a RowGroupMetaData, + field_id_map: &'a HashMap<i32, usize>, + snapshot_schema: &'a Schema, + ) -> Self { + Self { + row_group_metadata, + iceberg_field_id_to_parquet_column_index: field_id_map, + snapshot_schema, + } + } + + /// Evaluate this `RowGroupMetricsEvaluator`'s filter predicate against the + /// provided [`RowGroupMetaData`]'. Used by [`ArrowReader`] to + /// see if a Parquet file RowGroup could possibly contain data that matches + /// the scan's filter. + pub(crate) fn eval( + filter: &'a BoundPredicate, + row_group_metadata: &'a RowGroupMetaData, + field_id_map: &'a HashMap<i32, usize>, + snapshot_schema: &'a Schema, + ) -> Result<bool> { + if row_group_metadata.num_rows() == 0 { + return ROW_GROUP_CANT_MATCH; + } + + let mut evaluator = Self::new(row_group_metadata, field_id_map, snapshot_schema); + + visit(&mut evaluator, filter) + } + + fn stats_for_field_id(&self, field_id: i32) -> Option<&Statistics> { + let parquet_column_index = *self + .iceberg_field_id_to_parquet_column_index + .get(&field_id)?; + self.row_group_metadata + .column(parquet_column_index) + .statistics() + } + + fn null_count(&self, field_id: i32) -> Option<u64> { + self.stats_for_field_id(field_id) + .map(|stats| stats.null_count()) + } + + fn value_count(&self) -> u64 { + self.row_group_metadata.num_rows() as u64 + } + + fn contains_nulls_only(&self, field_id: i32) -> bool { + let null_count = self.null_count(field_id); + let value_count = self.value_count(); + + null_count == Some(value_count) + } + + fn may_contain_null(&self, field_id: i32) -> bool { + if let Some(null_count) = self.null_count(field_id) { + null_count > 0 + } else { + true + } + } + + fn stats_and_type_for_field_id( + &self, + field_id: i32, + ) -> Result<Option<(&Statistics, PrimitiveType)>> { + let Some(stats) = self.stats_for_field_id(field_id) else { + // No statistics for column + return Ok(None); + }; + + let Some(field) = self.snapshot_schema.field_by_id(field_id) else { + return Err(Error::new( + ErrorKind::Unexpected, + format!( + "Could not find a field with id '{}' in the snapshot schema", + &field_id + ), + )); + }; + + let Some(primitive_type) = field.field_type.as_primitive_type() else { + return Err(Error::new( + ErrorKind::Unexpected, + format!( + "Could not determine the PrimitiveType for field id '{}'", + &field_id + ), + )); + }; + + Ok(Some((stats, primitive_type.clone()))) + } + + fn min_value(&self, field_id: i32) -> Result<Option<Datum>> { + let Some((stats, primitive_type)) = self.stats_and_type_for_field_id(field_id)? else { + return Ok(None); + }; + + if !stats.has_min_max_set() { + return Ok(None); + } + + Ok(Some(match (primitive_type, stats) { + (PrimitiveType::Boolean, Statistics::Boolean(stats)) => Datum::bool(*stats.min()), + (PrimitiveType::Int, Statistics::Int32(stats)) => Datum::int(*stats.min()), + (PrimitiveType::Date, Statistics::Int32(stats)) => Datum::date(*stats.min()), + (PrimitiveType::Long, Statistics::Int64(stats)) => Datum::long(*stats.min()), + (PrimitiveType::Time, Statistics::Int64(stats)) => Datum::time_micros(*stats.min())?, + (PrimitiveType::Timestamp, Statistics::Int64(stats)) => { + Datum::timestamp_micros(*stats.min()) + } + (PrimitiveType::Timestamptz, Statistics::Int64(stats)) => { + Datum::timestamptz_micros(*stats.min()) + } + (PrimitiveType::Float, Statistics::Float(stats)) => Datum::float(*stats.min()), + (PrimitiveType::Double, Statistics::Double(stats)) => Datum::double(*stats.min()), + (PrimitiveType::String, Statistics::ByteArray(stats)) => { + Datum::string(stats.min().as_utf8()?) + } + // TODO: + // * Decimal + // * Uuid + // * Fixed + // * Binary + (primitive_type, _) => { Review Comment: Should we return `Ok(None)` instead of returning error? ########## crates/iceberg/src/expr/visitors/row_group_metrics_evaluator.rs: ########## @@ -0,0 +1,1927 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! Evaluates Parquet Row Group metrics + +use std::collections::HashMap; + +use fnv::FnvHashSet; +use parquet::file::metadata::RowGroupMetaData; +use parquet::file::statistics::Statistics; + +use crate::expr::visitors::bound_predicate_visitor::{visit, BoundPredicateVisitor}; +use crate::expr::{BoundPredicate, BoundReference}; +use crate::spec::{Datum, PrimitiveLiteral, PrimitiveType, Schema}; +use crate::{Error, ErrorKind, Result}; + +pub(crate) struct RowGroupMetricsEvaluator<'a> { + row_group_metadata: &'a RowGroupMetaData, + iceberg_field_id_to_parquet_column_index: &'a HashMap<i32, usize>, + snapshot_schema: &'a Schema, +} + +const IN_PREDICATE_LIMIT: usize = 200; +const ROW_GROUP_MIGHT_MATCH: Result<bool> = Ok(true); +const ROW_GROUP_CANT_MATCH: Result<bool> = Ok(false); + +impl<'a> RowGroupMetricsEvaluator<'a> { + fn new( + row_group_metadata: &'a RowGroupMetaData, + field_id_map: &'a HashMap<i32, usize>, + snapshot_schema: &'a Schema, + ) -> Self { + Self { + row_group_metadata, + iceberg_field_id_to_parquet_column_index: field_id_map, + snapshot_schema, + } + } + + /// Evaluate this `RowGroupMetricsEvaluator`'s filter predicate against the + /// provided [`RowGroupMetaData`]'. Used by [`ArrowReader`] to + /// see if a Parquet file RowGroup could possibly contain data that matches + /// the scan's filter. + pub(crate) fn eval( + filter: &'a BoundPredicate, + row_group_metadata: &'a RowGroupMetaData, + field_id_map: &'a HashMap<i32, usize>, + snapshot_schema: &'a Schema, + ) -> Result<bool> { + if row_group_metadata.num_rows() == 0 { + return ROW_GROUP_CANT_MATCH; + } + + let mut evaluator = Self::new(row_group_metadata, field_id_map, snapshot_schema); + + visit(&mut evaluator, filter) + } + + fn stats_for_field_id(&self, field_id: i32) -> Option<&Statistics> { + let parquet_column_index = *self + .iceberg_field_id_to_parquet_column_index + .get(&field_id)?; + self.row_group_metadata + .column(parquet_column_index) + .statistics() + } + + fn null_count(&self, field_id: i32) -> Option<u64> { + self.stats_for_field_id(field_id) + .map(|stats| stats.null_count()) + } + + fn value_count(&self) -> u64 { + self.row_group_metadata.num_rows() as u64 + } + + fn contains_nulls_only(&self, field_id: i32) -> bool { + let null_count = self.null_count(field_id); + let value_count = self.value_count(); + + null_count == Some(value_count) + } + + fn may_contain_null(&self, field_id: i32) -> bool { + if let Some(null_count) = self.null_count(field_id) { + null_count > 0 + } else { + true + } + } + + fn stats_and_type_for_field_id( + &self, + field_id: i32, + ) -> Result<Option<(&Statistics, PrimitiveType)>> { + let Some(stats) = self.stats_for_field_id(field_id) else { + // No statistics for column + return Ok(None); + }; + + let Some(field) = self.snapshot_schema.field_by_id(field_id) else { + return Err(Error::new( + ErrorKind::Unexpected, + format!( + "Could not find a field with id '{}' in the snapshot schema", + &field_id + ), + )); + }; + + let Some(primitive_type) = field.field_type.as_primitive_type() else { + return Err(Error::new( + ErrorKind::Unexpected, + format!( + "Could not determine the PrimitiveType for field id '{}'", + &field_id + ), + )); + }; + + Ok(Some((stats, primitive_type.clone()))) + } + + fn min_value(&self, field_id: i32) -> Result<Option<Datum>> { + let Some((stats, primitive_type)) = self.stats_and_type_for_field_id(field_id)? else { + return Ok(None); + }; + + if !stats.has_min_max_set() { + return Ok(None); + } + + Ok(Some(match (primitive_type, stats) { + (PrimitiveType::Boolean, Statistics::Boolean(stats)) => Datum::bool(*stats.min()), + (PrimitiveType::Int, Statistics::Int32(stats)) => Datum::int(*stats.min()), + (PrimitiveType::Date, Statistics::Int32(stats)) => Datum::date(*stats.min()), + (PrimitiveType::Long, Statistics::Int64(stats)) => Datum::long(*stats.min()), + (PrimitiveType::Time, Statistics::Int64(stats)) => Datum::time_micros(*stats.min())?, + (PrimitiveType::Timestamp, Statistics::Int64(stats)) => { + Datum::timestamp_micros(*stats.min()) + } + (PrimitiveType::Timestamptz, Statistics::Int64(stats)) => { + Datum::timestamptz_micros(*stats.min()) + } + (PrimitiveType::Float, Statistics::Float(stats)) => Datum::float(*stats.min()), + (PrimitiveType::Double, Statistics::Double(stats)) => Datum::double(*stats.min()), + (PrimitiveType::String, Statistics::ByteArray(stats)) => { + Datum::string(stats.min().as_utf8()?) + } + // TODO: + // * Decimal + // * Uuid + // * Fixed + // * Binary + (primitive_type, _) => { + return Err(Error::new( + ErrorKind::FeatureUnsupported, + format!("Conversion of min value for column of type {} to iceberg type {} is not yet supported", stats.physical_type(), primitive_type) + )); + } + })) + } + + fn max_value(&self, field_id: i32) -> Result<Option<Datum>> { + let Some((stats, primitive_type)) = self.stats_and_type_for_field_id(field_id)? else { + return Ok(None); + }; + + if !stats.has_min_max_set() { + return Ok(None); + } + + Ok(Some(match (primitive_type, stats) { Review Comment: Ditto. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org