aokolnychyi commented on code in PR #11273:
URL: https://github.com/apache/iceberg/pull/11273#discussion_r1828418948


##########
spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeltaWrite.java:
##########
@@ -437,20 +474,51 @@ protected PartitioningWriter<InternalRow, 
DataWriteResult> newDataWriter(
     // use a fanout writer if the input is unordered no matter whether fanout 
writers are enabled
     // clustered writers assume that the position deletes are already ordered 
by file and position
     protected PartitioningWriter<PositionDelete<InternalRow>, 
DeleteWriteResult> newDeleteWriter(
-        Table table, SparkFileWriterFactory writers, OutputFileFactory files, 
Context context) {
+        Table table,
+        Map<String, DeleteFileSet> rewritableDeletes,
+        SparkFileWriterFactory writers,
+        OutputFileFactory files,
+        Context context) {
 
       FileIO io = table.io();
       boolean inputOrdered = context.inputOrdered();
       long targetFileSize = context.targetDeleteFileSize();
       DeleteGranularity deleteGranularity = context.deleteGranularity();
 
-      if (inputOrdered) {
+      if (inputOrdered && rewritableDeletes == null) {
         return new ClusteredPositionDeleteWriter<>(
             writers, files, io, targetFileSize, deleteGranularity);
       } else {
         return new FanoutPositionOnlyDeleteWriter<>(
-            writers, files, io, targetFileSize, deleteGranularity);
+            writers,
+            files,
+            io,
+            targetFileSize,
+            deleteGranularity,
+            rewritableDeletes != null
+                ? new PreviousDeleteLoader(table, rewritableDeletes)
+                : path -> null /* no previous file scoped deletes */);
+      }
+    }
+  }
+
+  private static class PreviousDeleteLoader implements Function<CharSequence, 
PositionDeleteIndex> {
+    private final Map<String, DeleteFileSet> rewritableDeletes;
+    private final DeleteLoader deleteLoader;
+
+    PreviousDeleteLoader(Table table, Map<String, DeleteFileSet> 
rewritableDeletes) {
+      this.rewritableDeletes = rewritableDeletes;
+      this.deleteLoader = new BaseDeleteLoader(deleteFile -> 
table.io().newInputFile(deleteFile));

Review Comment:
   Can we cross check if we need anything extra for encryption? It just has to 
match what we do in other places that access `BaseDeleteLoader`.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to