maxdebayser commented on code in PR #7831:
URL: https://github.com/apache/iceberg/pull/7831#discussion_r1285960010


##########
python/pyiceberg/utils/file_stats.py:
##########
@@ -0,0 +1,164 @@
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from pyiceberg.manifest import DataFile, FileFormat
+import pyarrow.parquet as pq
+import pyarrow.compute as pc
+import pyarrow as pa
+import struct
+import datetime
+
+BOUND_TRUNCATED_LENGHT = 16
+
+# Serialization rules: 
https://iceberg.apache.org/spec/#binary-single-value-serialization
+#
+# Type      Binary serialization
+# boolean   0x00 for false, non-zero byte for true
+# int       Stored as 4-byte little-endian
+# long      Stored as 8-byte little-endian
+# float     Stored as 4-byte little-endian
+# double    Stored as 8-byte little-endian
+# date      Stores days from the 1970-01-01 in an 4-byte little-endian int
+# time      Stores microseconds from midnight in an 8-byte little-endian long
+# timestamp without zone       Stores microseconds from 1970-01-01 
00:00:00.000000 in an 8-byte little-endian long
+# timestamp with zone  Stores microseconds from 1970-01-01 00:00:00.000000 UTC 
in an 8-byte little-endian long
+# string    UTF-8 bytes (without length)
+# uuid      16-byte big-endian value, see example in Appendix B
+# fixed(L)  Binary value
+# binary    Binary value (without length)
+#
+def serialize_to_binary(scalar: pa.Scalar) -> bytes:
+    value = scalar.as_py()
+    if isinstance(scalar, pa.BooleanScalar):
+        return struct.pack('?', value)
+    elif isinstance(scalar, (pa.Int8Scalar, pa.UInt8Scalar)):
+        return struct.pack('<b', value)
+    elif isinstance(scalar, (pa.Int16Scalar, pa.UInt16Scalar)):
+        return struct.pack('<h', value)
+    elif isinstance(scalar, (pa.Int32Scalar, pa.UInt32Scalar)):
+        return struct.pack('<i', value)
+    elif isinstance(scalar, (pa.Int64Scalar, pa.UInt64Scalar)):
+        return struct.pack('<q', value)
+    elif isinstance(scalar, pa.FloatScalar):
+        return struct.pack('<f', value)
+    elif isinstance(scalar, pa.DoubleScalar):
+        return struct.pack('<d', value)
+    elif isinstance(scalar, pa.StringScalar):
+        return value.encode('utf-8')
+    elif isinstance(scalar, pa.BinaryScalar):
+        return value
+    elif isinstance(scalar, (pa.Date32Scalar, pa.Date64Scalar)):
+        epoch = datetime.date(1970, 1, 1)
+        days = (value - epoch).days
+        return struct.pack('<i', days)
+    elif isinstance(scalar, (pa.Time32Scalar, pa.Time64Scalar)):
+        microseconds = int(value.hour * 60 * 60 * 1e6 +
+                        value.minute * 60 * 1e6 +
+                        value.second * 1e6 +
+                        value.microsecond)
+        return struct.pack('<q', microseconds)
+    elif isinstance(scalar, pa.TimestampScalar):
+        epoch = datetime.datetime(1970, 1, 1)
+        microseconds = int((value - epoch).total_seconds() * 1e6)
+        return struct.pack('<q', microseconds)
+    else:
+        raise TypeError('Unsupported type: {}'.format(type(scalar)))
+
+
+def fill_parquet_file_metadata(df: DataFile, file_object: pa.NativeFile, 
table: pa.Table = None) -> None:
+    """
+    Computes and fills the following fields of the DataFile object:
+
+    - file_format
+    - record_count
+    - file_size_in_bytes
+    - column_sizes
+    - value_counts
+    - null_value_counts
+    - nan_value_counts
+    - lower_bounds
+    - upper_bounds
+    - split_offsets
+    
+    Args:
+        df (DataFile): A DataFile object representing the Parquet file for 
which metadata is to be filled.
+        file_object (pa.NativeFile): A pyarrow NativeFile object pointing to 
the location where the 
+            Parquet file is stored.
+        table (pa.Table, optional): If the metadata is computed while writing 
a pyarrow Table to parquet
+            the table can be passed to compute the column statistics. If 
absent the table will be read
+            from file_object using pyarrow.parquet.read_table.
+    """
+    
+    parquet_file = pq.ParquetFile(file_object)

Review Comment:
   Yes, collecting the statistics while writing has been implemented.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to