rdblue commented on code in PR #41: URL: https://github.com/apache/iceberg-python/pull/41#discussion_r1456502771
########## pyiceberg/table/__init__.py: ########## @@ -1935,3 +2043,184 @@ def _generate_snapshot_id() -> int: snapshot_id = snapshot_id if snapshot_id >= 0 else snapshot_id * -1 return snapshot_id + + +@dataclass(frozen=True) +class WriteTask: + write_uuid: uuid.UUID + task_id: int + df: pa.Table + sort_order_id: Optional[int] = None + + # Later to be extended with partition information + + def generate_data_file_filename(self, extension: str) -> str: + # Mimics the behavior in the Java API: + # https://github.com/apache/iceberg/blob/a582968975dd30ff4917fbbe999f1be903efac02/core/src/main/java/org/apache/iceberg/io/OutputFileFactory.java#L92-L101 + return f"00000-{self.task_id}-{self.write_uuid}.{extension}" + + +def _new_manifest_path(location: str, num: int, commit_uuid: uuid.UUID) -> str: + return f'{location}/metadata/{commit_uuid}-m{num}.avro' + + +def _generate_manifest_list_path(location: str, snapshot_id: int, attempt: int, commit_uuid: uuid.UUID) -> str: + # Mimics the behavior in Java: + # https://github.com/apache/iceberg/blob/c862b9177af8e2d83122220764a056f3b96fd00c/core/src/main/java/org/apache/iceberg/SnapshotProducer.java#L491 + return f'{location}/metadata/snap-{snapshot_id}-{attempt}-{commit_uuid}.avro' + + +def _dataframe_to_data_files(table: Table, df: pa.Table) -> Iterable[DataFile]: + from pyiceberg.io.pyarrow import write_file + + if len(table.spec().fields) > 0: + raise ValueError("Cannot write to partitioned tables") + + if len(table.sort_order().fields) > 0: + raise ValueError("Cannot write to tables with a sort-order") + + write_uuid = uuid.uuid4() + counter = itertools.count(0) + + # This is an iter, so we don't have to materialize everything every time + # This will be more relevant when we start doing partitioned writes + yield from write_file(table, iter([WriteTask(write_uuid, next(counter), df)])) + + +class _MergeAppend: + _operation: Operation + _table: Table + _snapshot_id: int + _parent_snapshot_id: Optional[int] + _added_data_files: List[DataFile] + _commit_uuid: uuid.UUID + + def __init__(self, operation: Operation, table: Table, snapshot_id: int) -> None: + self._operation = operation + self._table = table + self._snapshot_id = snapshot_id + # Since we only support the main branch for now + self._parent_snapshot_id = snapshot.snapshot_id if (snapshot := self._table.current_snapshot()) else None + self._added_data_files = [] + self._commit_uuid = uuid.uuid4() + + def append_data_file(self, data_file: DataFile) -> _MergeAppend: + self._added_data_files.append(data_file) + return self + + def _deleted_entries(self) -> List[ManifestEntry]: + """To determine if we need to record any deleted entries. + + With partial overwrites we have to use the predicate to evaluate + which entries are affected. + """ + if self._operation == Operation.OVERWRITE: + if self._parent_snapshot_id is not None: + previous_snapshot = self._table.snapshot_by_id(self._parent_snapshot_id) + if previous_snapshot is None: + # This should never happen since you cannot overwrite an empty table + raise ValueError(f"Could not find the previous snapshot: {self._parent_snapshot_id}") + + executor = ExecutorFactory.get_or_create() + + def _get_entries(manifest: ManifestFile) -> List[ManifestEntry]: + return [ + ManifestEntry( + status=ManifestEntryStatus.DELETED, + snapshot_id=entry.snapshot_id, + data_sequence_number=entry.data_sequence_number, + file_sequence_number=entry.file_sequence_number, + data_file=entry.data_file, + ) + for entry in manifest.fetch_manifest_entry(self._table.io, discard_deleted=True) + ] + + list_of_entries = executor.map(_get_entries, previous_snapshot.manifests(self._table.io)) + return list(chain(*list_of_entries)) + return [] + elif self._operation == Operation.APPEND: + return [] + else: + raise ValueError(f"Not implemented for: {self._operation}") + + def _manifests(self) -> List[ManifestFile]: + manifests = [] + deleted_entries = self._deleted_entries() + + if self._added_data_files: + output_file_location = _new_manifest_path(location=self._table.location(), num=0, commit_uuid=self._commit_uuid) + with write_manifest( + format_version=self._table.format_version, + spec=self._table.spec(), + schema=self._table.schema(), + output_file=self._table.io.new_output(output_file_location), + snapshot_id=self._snapshot_id, + ) as writer: + for data_file in self._added_data_files: + writer.add_entry( + ManifestEntry( + status=ManifestEntryStatus.ADDED, + snapshot_id=self._snapshot_id, + data_sequence_number=None, + file_sequence_number=None, + data_file=data_file, + ) + ) + + for delete_entry in deleted_entries: + writer.add_entry(delete_entry) Review Comment: I think this approach works fine, but I want to point out that there are drawbacks to writing the deletes in the same manifest: 1. A reader has to load all of the deletes, even though the files aren't useful. If they are in a separate manifest, readers can filter out manifests that have no EXISTING or ADDED data files. 2. Manifests with no data files can be removed in future append commits. 3. This write is single-threaded. In the Java implementation, we produce a manifest of deleted data files for each existing manifest. That allows us to parallelize the operation. Here's the logic we use to drop manifests that aren't needed on the Java side when producing the new list of manifests: ```java // only keep manifests that have live data files or that were written by this commit Predicate<ManifestFile> shouldKeep = manifest -> manifest.hasAddedFiles() || manifest.hasExistingFiles() || manifest.snapshotId() == snapshotId(); ``` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org