Fokko commented on code in PR #7831:
URL: https://github.com/apache/iceberg/pull/7831#discussion_r1262637285


##########
python/pyiceberg/io/pyarrow.py:
##########
@@ -1013,3 +1025,271 @@ def map_key_partner(self, partner_map: 
Optional[pa.Array]) -> Optional[pa.Array]
 
     def map_value_partner(self, partner_map: Optional[pa.Array]) -> 
Optional[pa.Array]:
         return partner_map.items if isinstance(partner_map, pa.MapArray) else 
None
+
+
+class StatsAggregator:
+    def __init__(self, type_string: str, trunc_length: Optional[int] = None) 
-> None:
+        self.current_min: Any = None
+        self.current_max: Any = None
+        self.trunc_length = trunc_length
+        self.primitive_type: Optional[PrimitiveType] = None
+
+        if type_string == "BOOLEAN":
+            self.primitive_type = BooleanType()
+        elif type_string == "INT32":
+            self.primitive_type = IntegerType()
+        elif type_string == "INT64":
+            self.primitive_type = LongType()
+        elif type_string == "INT96":
+            raise NotImplementedError("Statistics not implemented for INT96 
physical type")
+        elif type_string == "FLOAT":
+            self.primitive_type = FloatType()
+        elif type_string == "DOUBLE":
+            self.primitive_type = DoubleType()
+        elif type_string == "BYTE_ARRAY":
+            self.primitive_type = BinaryType()
+        elif type_string == "FIXED_LEN_BYTE_ARRAY":
+            self.primitive_type = BinaryType()
+        else:
+            raise AssertionError(f"Unknown physical type {type_string}")
+
+    def serialize(self, value: Any) -> bytes:
+        if type(value) == str:
+            value = value.encode()
+        assert self.primitive_type is not None  # appease mypy
+        return to_bytes(self.primitive_type, value)
+
+    def add_min(self, val: Any) -> None:
+        if self.current_min is None:
+            self.current_min = val
+        else:
+            self.current_min = min(val, self.current_min)
+
+    def add_max(self, val: Any) -> None:
+        if self.current_max is None:
+            self.current_max = val
+        else:
+            self.current_max = max(self.current_max, val)
+
+    def get_min(self) -> bytes:
+        return self.serialize(self.current_min)[: self.trunc_length]
+
+    def get_max(self) -> bytes:
+        return self.serialize(self.current_max)[: self.trunc_length]
+
+
+DEFAULT_TRUNCATION_LENGHT = 16
+TRUNCATION_EXPR = r"^truncate\((\d+)\)$"
+
+
+class MetricModeTypes(Enum):
+    TRUNCATE = "truncate"
+    NONE = "none"
+    COUNTS = "counts"
+    FULL = "full"
+
+
+DEFAULT_METRICS_MODE_KEY = "write.metadata.metrics.default"
+COLUMN_METRICS_MODE_KEY = "write.metadata.metrics.column"
+
+
+@dataclass(frozen=True)
+class MetricsMode(Singleton):
+    type: MetricModeTypes
+    length: Optional[int] = None
+
+
+def match_metrics_mode(mode: str) -> MetricsMode:
+    m = re.match(TRUNCATION_EXPR, mode, re.IGNORECASE)
+    if m:
+        length = int(m[1])
+        if length < 1:
+            raise AssertionError("Truncation length must be larger than 0")
+        return MetricsMode(MetricModeTypes.TRUNCATE, int(m[1]))
+    elif re.match("^none$", mode, re.IGNORECASE):
+        return MetricsMode(MetricModeTypes.NONE)
+    elif re.match("^counts$", mode, re.IGNORECASE):
+        return MetricsMode(MetricModeTypes.COUNTS)
+    elif re.match("^full$", mode, re.IGNORECASE):
+        return MetricsMode(MetricModeTypes.FULL)
+    else:
+        raise AssertionError(f"Unsupported metrics mode {mode}")
+
+
+@dataclass(frozen=True)
+class StatisticsCollector:
+    field_id: int
+    iceberg_type: PrimitiveType
+    mode: MetricsMode
+    column_name: str
+
+
+class 
PyArrowStatisticsCollector(PreOrderSchemaVisitor[List[StatisticsCollector]]):
+    _field_id = 0
+    _schema: Schema
+    _properties: Dict[str, str]
+
+    def __init__(self, schema: Schema, properties: Dict[str, str]):
+        self._schema = schema
+        self._properties = properties
+
+    def schema(self, schema: Schema, struct_result: Callable[[], 
List[StatisticsCollector]]) -> List[StatisticsCollector]:
+        return struct_result()
+
+    def struct(
+        self, struct: StructType, field_results: List[Callable[[], 
List[StatisticsCollector]]]
+    ) -> List[StatisticsCollector]:
+        return list(chain(*[result() for result in field_results]))
+
+    def field(self, field: NestedField, field_result: Callable[[], 
List[StatisticsCollector]]) -> List[StatisticsCollector]:
+        self._field_id = field.field_id
+        result = field_result()
+        return result
+
+    def list(self, list_type: ListType, element_result: Callable[[], 
List[StatisticsCollector]]) -> List[StatisticsCollector]:
+        self._field_id = list_type.element_id
+        return element_result()
+
+    def map(
+        self,
+        map_type: MapType,
+        key_result: Callable[[], List[StatisticsCollector]],
+        value_result: Callable[[], List[StatisticsCollector]],
+    ) -> List[StatisticsCollector]:
+        self._field_id = map_type.key_id
+        k = key_result()
+        self._field_id = map_type.value_id
+        v = value_result()
+        return k + v
+
+    def primitive(self, primitive: PrimitiveType) -> List[StatisticsCollector]:
+        column_name = self._schema.find_column_name(self._field_id)
+        assert column_name is not None, f"Column for field {self._field_id} 
not found"
+
+        metrics_mode = MetricsMode(MetricModeTypes.TRUNCATE, 
DEFAULT_TRUNCATION_LENGHT)
+
+        default_mode = self._properties.get(DEFAULT_METRICS_MODE_KEY)
+        if default_mode:
+            metrics_mode = match_metrics_mode(default_mode)
+
+        col_mode = 
self._properties.get(f"{COLUMN_METRICS_MODE_KEY}.{column_name}")
+        if col_mode:
+            metrics_mode = match_metrics_mode(col_mode)
+
+        return [StatisticsCollector(field_id=self._field_id, 
iceberg_type=primitive, mode=metrics_mode, column_name=column_name)]
+
+
+def fill_parquet_file_metadata(
+    df: DataFile,
+    parquet_metadata: pq.FileMetaData,
+    file_size: int,
+    table_metadata: TableMetadata,
+) -> None:
+    """
+    Computes and fills the following fields of the DataFile object.
+
+    - file_format
+    - record_count
+    - file_size_in_bytes
+    - column_sizes
+    - value_counts
+    - null_value_counts
+    - nan_value_counts
+    - lower_bounds
+    - upper_bounds
+    - split_offsets
+
+    Args:
+        df (DataFile): A DataFile object representing the Parquet file for 
which metadata is to be filled.
+        parquet_metadata (pyarrow.parquet.FileMetaData): A pyarrow metadata 
object.
+        file_size (int): The total compressed file size cannot be retrieved 
from the metadata and hence has to
+            be passed here. Depending on the kind of file system and pyarrow 
library call used, different
+            ways to obtain this value might be appropriate.
+        table_metadata (pyiceberg.table.metadata.TableMetadata): The Iceberg 
table metadata. It is required to
+            compute the mapping if column position to iceberg schema type id. 
It's also used to set the mode
+            for column metrics collection
+    """
+    schema = next(filter(lambda s: s.schema_id == 
table_metadata.current_schema_id, table_metadata.schemas))
+
+    stats_columns = pre_order_visit(schema, PyArrowStatisticsCollector(schema, 
table_metadata.properties))
+    assert parquet_metadata.num_columns == len(
+        stats_columns
+    ), f"Number of columns in metadata ({len(stats_columns)}) is different 
from the number of columns in pyarrow table ({parquet_metadata.num_columns})"
+
+    col_index_2_id = {i: stat.field_id for i, stat in enumerate(stats_columns)}
+
+    column_sizes: Dict[int, int] = {}
+    value_counts: Dict[int, int] = {}
+    split_offsets: List[int] = []
+
+    null_value_counts: Dict[int, int] = {}
+    nan_value_counts: Dict[int, int] = {}
+
+    col_aggs = {}
+
+    for r in range(parquet_metadata.num_row_groups):
+        # References:
+        # 
https://github.com/apache/iceberg/blob/fc381a81a1fdb8f51a0637ca27cd30673bd7aad3/parquet/src/main/java/org/apache/iceberg/parquet/ParquetUtil.java#L232
+        # 
https://github.com/apache/parquet-mr/blob/ac29db4611f86a07cc6877b416aa4b183e09b353/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/ColumnChunkMetaData.java#L184
+
+        row_group = parquet_metadata.row_group(r)
+
+        data_offset = row_group.column(0).data_page_offset
+        dictionary_offset = row_group.column(0).dictionary_page_offset
+
+        if row_group.column(0).has_dictionary_page and dictionary_offset < 
data_offset:
+            split_offsets.append(dictionary_offset)
+        else:
+            split_offsets.append(data_offset)
+
+        for c in range(parquet_metadata.num_columns):

Review Comment:
   I think we can get rid of the `col_index_2_id` dict altogether:
   ```suggestion
           for idx, stat_col in enumerate(stats_columns):
   ```
   This way we have all the information that we need. 



##########
python/pyiceberg/avro/__init__.py:
##########
@@ -16,5 +16,8 @@
 # under the License.
 import struct
 
+STRUCT_BOOL = struct.Struct("?")

Review Comment:
   We can revert the changes in this file, now we use `to_bytes`



##########
python/pyiceberg/io/pyarrow.py:
##########
@@ -1013,3 +1025,271 @@ def map_key_partner(self, partner_map: 
Optional[pa.Array]) -> Optional[pa.Array]
 
     def map_value_partner(self, partner_map: Optional[pa.Array]) -> 
Optional[pa.Array]:
         return partner_map.items if isinstance(partner_map, pa.MapArray) else 
None
+
+
+class StatsAggregator:
+    def __init__(self, type_string: str, trunc_length: Optional[int] = None) 
-> None:
+        self.current_min: Any = None
+        self.current_max: Any = None
+        self.trunc_length = trunc_length
+        self.primitive_type: Optional[PrimitiveType] = None
+
+        if type_string == "BOOLEAN":
+            self.primitive_type = BooleanType()
+        elif type_string == "INT32":
+            self.primitive_type = IntegerType()
+        elif type_string == "INT64":
+            self.primitive_type = LongType()
+        elif type_string == "INT96":
+            raise NotImplementedError("Statistics not implemented for INT96 
physical type")
+        elif type_string == "FLOAT":
+            self.primitive_type = FloatType()
+        elif type_string == "DOUBLE":
+            self.primitive_type = DoubleType()
+        elif type_string == "BYTE_ARRAY":
+            self.primitive_type = BinaryType()
+        elif type_string == "FIXED_LEN_BYTE_ARRAY":
+            self.primitive_type = BinaryType()
+        else:
+            raise AssertionError(f"Unknown physical type {type_string}")
+
+    def serialize(self, value: Any) -> bytes:
+        if type(value) == str:
+            value = value.encode()
+        assert self.primitive_type is not None  # appease mypy
+        return to_bytes(self.primitive_type, value)
+
+    def add_min(self, val: Any) -> None:
+        if self.current_min is None:
+            self.current_min = val
+        else:
+            self.current_min = min(val, self.current_min)
+
+    def add_max(self, val: Any) -> None:
+        if self.current_max is None:
+            self.current_max = val
+        else:
+            self.current_max = max(self.current_max, val)
+
+    def get_min(self) -> bytes:
+        return self.serialize(self.current_min)[: self.trunc_length]
+
+    def get_max(self) -> bytes:
+        return self.serialize(self.current_max)[: self.trunc_length]
+
+
+DEFAULT_TRUNCATION_LENGHT = 16
+TRUNCATION_EXPR = r"^truncate\((\d+)\)$"
+
+
+class MetricModeTypes(Enum):
+    TRUNCATE = "truncate"
+    NONE = "none"
+    COUNTS = "counts"
+    FULL = "full"
+
+
+DEFAULT_METRICS_MODE_KEY = "write.metadata.metrics.default"
+COLUMN_METRICS_MODE_KEY = "write.metadata.metrics.column"
+
+
+@dataclass(frozen=True)
+class MetricsMode(Singleton):
+    type: MetricModeTypes
+    length: Optional[int] = None
+
+
+def match_metrics_mode(mode: str) -> MetricsMode:
+    m = re.match(TRUNCATION_EXPR, mode, re.IGNORECASE)
+    if m:
+        length = int(m[1])
+        if length < 1:
+            raise AssertionError("Truncation length must be larger than 0")
+        return MetricsMode(MetricModeTypes.TRUNCATE, int(m[1]))
+    elif re.match("^none$", mode, re.IGNORECASE):
+        return MetricsMode(MetricModeTypes.NONE)
+    elif re.match("^counts$", mode, re.IGNORECASE):
+        return MetricsMode(MetricModeTypes.COUNTS)
+    elif re.match("^full$", mode, re.IGNORECASE):
+        return MetricsMode(MetricModeTypes.FULL)
+    else:
+        raise AssertionError(f"Unsupported metrics mode {mode}")
+
+
+@dataclass(frozen=True)
+class StatisticsCollector:
+    field_id: int
+    iceberg_type: PrimitiveType
+    mode: MetricsMode
+    column_name: str
+
+
+class 
PyArrowStatisticsCollector(PreOrderSchemaVisitor[List[StatisticsCollector]]):
+    _field_id = 0
+    _schema: Schema
+    _properties: Dict[str, str]
+
+    def __init__(self, schema: Schema, properties: Dict[str, str]):
+        self._schema = schema
+        self._properties = properties
+
+    def schema(self, schema: Schema, struct_result: Callable[[], 
List[StatisticsCollector]]) -> List[StatisticsCollector]:
+        return struct_result()
+
+    def struct(
+        self, struct: StructType, field_results: List[Callable[[], 
List[StatisticsCollector]]]
+    ) -> List[StatisticsCollector]:
+        return list(chain(*[result() for result in field_results]))
+
+    def field(self, field: NestedField, field_result: Callable[[], 
List[StatisticsCollector]]) -> List[StatisticsCollector]:
+        self._field_id = field.field_id
+        result = field_result()
+        return result
+
+    def list(self, list_type: ListType, element_result: Callable[[], 
List[StatisticsCollector]]) -> List[StatisticsCollector]:
+        self._field_id = list_type.element_id
+        return element_result()
+
+    def map(
+        self,
+        map_type: MapType,
+        key_result: Callable[[], List[StatisticsCollector]],
+        value_result: Callable[[], List[StatisticsCollector]],
+    ) -> List[StatisticsCollector]:
+        self._field_id = map_type.key_id
+        k = key_result()
+        self._field_id = map_type.value_id
+        v = value_result()
+        return k + v
+
+    def primitive(self, primitive: PrimitiveType) -> List[StatisticsCollector]:
+        column_name = self._schema.find_column_name(self._field_id)
+        assert column_name is not None, f"Column for field {self._field_id} 
not found"
+
+        metrics_mode = MetricsMode(MetricModeTypes.TRUNCATE, 
DEFAULT_TRUNCATION_LENGHT)
+
+        default_mode = self._properties.get(DEFAULT_METRICS_MODE_KEY)
+        if default_mode:
+            metrics_mode = match_metrics_mode(default_mode)
+
+        col_mode = 
self._properties.get(f"{COLUMN_METRICS_MODE_KEY}.{column_name}")
+        if col_mode:
+            metrics_mode = match_metrics_mode(col_mode)
+
+        return [StatisticsCollector(field_id=self._field_id, 
iceberg_type=primitive, mode=metrics_mode, column_name=column_name)]
+
+
+def fill_parquet_file_metadata(
+    df: DataFile,
+    parquet_metadata: pq.FileMetaData,
+    file_size: int,
+    table_metadata: TableMetadata,
+) -> None:
+    """
+    Computes and fills the following fields of the DataFile object.
+
+    - file_format
+    - record_count
+    - file_size_in_bytes
+    - column_sizes
+    - value_counts
+    - null_value_counts
+    - nan_value_counts
+    - lower_bounds
+    - upper_bounds
+    - split_offsets
+
+    Args:
+        df (DataFile): A DataFile object representing the Parquet file for 
which metadata is to be filled.
+        parquet_metadata (pyarrow.parquet.FileMetaData): A pyarrow metadata 
object.
+        file_size (int): The total compressed file size cannot be retrieved 
from the metadata and hence has to
+            be passed here. Depending on the kind of file system and pyarrow 
library call used, different
+            ways to obtain this value might be appropriate.
+        table_metadata (pyiceberg.table.metadata.TableMetadata): The Iceberg 
table metadata. It is required to
+            compute the mapping if column position to iceberg schema type id. 
It's also used to set the mode
+            for column metrics collection
+    """
+    schema = next(filter(lambda s: s.schema_id == 
table_metadata.current_schema_id, table_metadata.schemas))
+
+    stats_columns = pre_order_visit(schema, PyArrowStatisticsCollector(schema, 
table_metadata.properties))
+    assert parquet_metadata.num_columns == len(
+        stats_columns
+    ), f"Number of columns in metadata ({len(stats_columns)}) is different 
from the number of columns in pyarrow table ({parquet_metadata.num_columns})"
+
+    col_index_2_id = {i: stat.field_id for i, stat in enumerate(stats_columns)}
+
+    column_sizes: Dict[int, int] = {}
+    value_counts: Dict[int, int] = {}
+    split_offsets: List[int] = []
+
+    null_value_counts: Dict[int, int] = {}
+    nan_value_counts: Dict[int, int] = {}
+
+    col_aggs = {}
+
+    for r in range(parquet_metadata.num_row_groups):
+        # References:
+        # 
https://github.com/apache/iceberg/blob/fc381a81a1fdb8f51a0637ca27cd30673bd7aad3/parquet/src/main/java/org/apache/iceberg/parquet/ParquetUtil.java#L232
+        # 
https://github.com/apache/parquet-mr/blob/ac29db4611f86a07cc6877b416aa4b183e09b353/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/ColumnChunkMetaData.java#L184
+
+        row_group = parquet_metadata.row_group(r)
+
+        data_offset = row_group.column(0).data_page_offset
+        dictionary_offset = row_group.column(0).dictionary_page_offset
+
+        if row_group.column(0).has_dictionary_page and dictionary_offset < 
data_offset:
+            split_offsets.append(dictionary_offset)
+        else:
+            split_offsets.append(data_offset)
+
+        for c in range(parquet_metadata.num_columns):
+            col_id = col_index_2_id[c]
+
+            column = row_group.column(c)
+
+            column_sizes[col_id] = column_sizes.get(col_id, 0) + 
column.total_compressed_size
+
+            metrics_mode = stats_columns[c].mode
+
+            if metrics_mode == MetricsMode(MetricModeTypes.NONE):
+                continue
+
+            value_counts[col_id] = value_counts.get(col_id, 0) + 
column.num_values
+
+            if column.is_stats_set:

Review Comment:
   When this isn't set, we probably want to emit a `logger.warn("PyArrow 
statistics missing when writing file")`



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to