geruh commented on code in PR #614:
URL: https://github.com/apache/iceberg-python/pull/614#discussion_r1611039641


##########
pyiceberg/table/__init__.py:
##########
@@ -3537,6 +3537,106 @@ def update_partitions_map(
             schema=table_schema,
         )
 
+    def files(self, snapshot_id: Optional[int] = None) -> "pa.Table":
+        import pyarrow as pa
+
+        from pyiceberg.io.pyarrow import schema_to_pyarrow
+
+        schema = self.tbl.metadata.schema()
+        readable_metrics_struct = []
+
+        def _readable_metrics_struct(bound_type: PrimitiveType) -> 
pa.StructType:
+            pa_bound_type = schema_to_pyarrow(bound_type)
+            return pa.struct([
+                pa.field("column_size", pa.int64(), nullable=True),
+                pa.field("value_count", pa.int64(), nullable=True),
+                pa.field("null_value_count", pa.int64(), nullable=True),
+                pa.field("nan_value_count", pa.int64(), nullable=True),
+                pa.field("lower_bound", pa_bound_type, nullable=True),
+                pa.field("upper_bound", pa_bound_type, nullable=True),
+            ])
+
+        for field in self.tbl.metadata.schema().fields:
+            readable_metrics_struct.append(
+                pa.field(schema.find_column_name(field.field_id), 
_readable_metrics_struct(field.field_type), nullable=False)
+            )
+
+        files_schema = pa.schema([
+            pa.field('content', pa.int8(), nullable=False),
+            pa.field('file_path', pa.string(), nullable=False),
+            pa.field('file_format', pa.dictionary(pa.int32(), pa.string()), 
nullable=False),
+            pa.field('spec_id', pa.int32(), nullable=False),
+            pa.field('record_count', pa.int64(), nullable=False),
+            pa.field('file_size_in_bytes', pa.int64(), nullable=False),
+            pa.field('column_sizes', pa.map_(pa.int32(), pa.int64()), 
nullable=True),
+            pa.field('value_counts', pa.map_(pa.int32(), pa.int64()), 
nullable=True),
+            pa.field('null_value_counts', pa.map_(pa.int32(), pa.int64()), 
nullable=True),
+            pa.field('nan_value_counts', pa.map_(pa.int32(), pa.int64()), 
nullable=True),
+            pa.field('lower_bounds', pa.map_(pa.int32(), pa.binary()), 
nullable=True),
+            pa.field('upper_bounds', pa.map_(pa.int32(), pa.binary()), 
nullable=True),
+            pa.field('key_metadata', pa.binary(), nullable=True),
+            pa.field('split_offsets', pa.list_(pa.int64()), nullable=True),
+            pa.field('equality_ids', pa.list_(pa.int32()), nullable=True),
+            pa.field('sort_order_id', pa.int32(), nullable=True),
+            pa.field('readable_metrics', pa.struct(readable_metrics_struct), 
nullable=True),
+        ])
+
+        files = []
+
+        snapshot = self._get_snapshot(snapshot_id)

Review Comment:
   I know that we have extended the behavior to provide files for a given 
snapshot, but shouldn't we prioritize the behavior of OSS Iceberg? Currently, 
this logic treats both no snapshot and an invalid snapshot as exceptions. 
However, what do you think about distinguishing these cases? We could throw an 
exception for an invalid snapshot ID, and for an empty table like the Java 
implementation return a table with the correct schema but no results.
   
   cc: @HonahX @Fokko @amogh-jahagirdar 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to