rdblue commented on code in PR #41:
URL: https://github.com/apache/iceberg-python/pull/41#discussion_r1451809069
##########
pyiceberg/io/pyarrow.py:
##########
@@ -1565,13 +1564,54 @@ def fill_parquet_file_metadata(
del upper_bounds[field_id]
del null_value_counts[field_id]
- df.file_format = FileFormat.PARQUET
df.record_count = parquet_metadata.num_rows
- df.file_size_in_bytes = file_size
df.column_sizes = column_sizes
df.value_counts = value_counts
df.null_value_counts = null_value_counts
df.nan_value_counts = nan_value_counts
df.lower_bounds = lower_bounds
df.upper_bounds = upper_bounds
df.split_offsets = split_offsets
+
+
+def write_file(table: Table, tasks: Iterator[WriteTask]) -> Iterator[DataFile]:
+ task = next(tasks)
+
+ try:
+ _ = next(tasks)
+ # If there are more tasks, raise an exception
+ raise ValueError("Only unpartitioned writes are supported:
https://github.com/apache/iceberg-python/issues/208")
+ except StopIteration:
+ pass
+
+ df = task.df
+
+ file_path =
f'{table.location()}/data/{_generate_datafile_filename("parquet")}'
+ file_schema = schema_to_pyarrow(table.schema())
+
+ collected_metrics: List[pq.FileMetaData] = []
+ fo = table.io.new_output(file_path)
+ with fo.create() as fos:
+ with pq.ParquetWriter(fos, schema=file_schema, version="1.0",
metadata_collector=collected_metrics) as writer:
+ writer.write_table(df)
+
+ df = DataFile(
+ content=DataFileContent.DATA,
+ file_path=file_path,
+ file_format=FileFormat.PARQUET,
+ partition=Record(),
+ record_count=len(df),
+ file_size_in_bytes=len(fo),
+ # Just copy these from the table for now
+ sort_order_id=table.sort_order().order_id,
+ spec_id=table.spec().spec_id,
+ equality_ids=table.schema().identifier_field_ids,
+ key_metadata=None,
+ )
+ fill_parquet_file_metadata(
+ df=df,
+ parquet_metadata=collected_metrics[0],
Review Comment:
So if Arrow decides to write multiple files, there will be one entry per
file in this list?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]