rdblue commented on code in PR #41:
URL: https://github.com/apache/iceberg-python/pull/41#discussion_r1427433153
##########
pyiceberg/io/pyarrow.py:
##########
@@ -1565,13 +1564,54 @@ def fill_parquet_file_metadata(
del upper_bounds[field_id]
del null_value_counts[field_id]
- df.file_format = FileFormat.PARQUET
df.record_count = parquet_metadata.num_rows
- df.file_size_in_bytes = file_size
df.column_sizes = column_sizes
df.value_counts = value_counts
df.null_value_counts = null_value_counts
df.nan_value_counts = nan_value_counts
df.lower_bounds = lower_bounds
df.upper_bounds = upper_bounds
df.split_offsets = split_offsets
+
+
+def write_file(table: Table, tasks: Iterator[WriteTask]) -> Iterator[DataFile]:
+ task = next(tasks)
+
+ try:
+ _ = next(tasks)
+ # If there are more tasks, raise an exception
+ raise ValueError("Only unpartitioned writes are supported:
https://github.com/apache/iceberg-python/issues/208")
+ except StopIteration:
+ pass
+
+ df = task.df
+
+ file_path =
f'{table.location()}/data/{_generate_datafile_filename("parquet")}'
+ file_schema = schema_to_pyarrow(table.schema())
+
+ collected_metrics: List[pq.FileMetaData] = []
+ fo = table.io.new_output(file_path)
+ with fo.create() as fos:
+ with pq.ParquetWriter(fos, schema=file_schema, version="1.0",
metadata_collector=collected_metrics) as writer:
+ writer.write_table(df)
+
+ df = DataFile(
Review Comment:
It's confusing that `df` is used for both the input dataframe and the output
data file.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]