nastra commented on code in PR #13310:
URL: https://github.com/apache/iceberg/pull/13310#discussion_r2192106169


##########
spark/v4.0/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestRowLevelOperationsWithLineage.java:
##########
@@ -0,0 +1,491 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark.extensions;
+
+import static org.apache.iceberg.MetadataColumns.schemaWithRowLineage;
+import static org.apache.iceberg.spark.Spark3Util.loadIcebergTable;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assumptions.assumeThat;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.FileFormat;
+import org.apache.iceberg.Files;
+import org.apache.iceberg.MetadataColumns;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.SnapshotRef;
+import org.apache.iceberg.StructLike;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TestHelpers;
+import org.apache.iceberg.data.GenericAppenderFactory;
+import org.apache.iceberg.data.GenericRecord;
+import org.apache.iceberg.data.Record;
+import org.apache.iceberg.encryption.EncryptionUtil;
+import org.apache.iceberg.io.DataWriter;
+import org.apache.iceberg.io.OutputFile;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
+import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.spark.functions.BucketFunction;
+import org.apache.iceberg.types.Types;
+import org.apache.iceberg.util.Pair;
+import org.apache.iceberg.util.PartitionMap;
+import org.apache.spark.sql.catalyst.analysis.NoSuchTableException;
+import org.apache.spark.sql.catalyst.parser.ParseException;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.TestTemplate;
+
+public abstract class TestRowLevelOperationsWithLineage extends 
SparkRowLevelOperationsTestBase {
+  static final Function<StructLike, StructLike> BUCKET_PARTITION_GENERATOR =
+      record ->
+          TestHelpers.Row.of(BucketFunction.BucketInt.invoke(2, record.get(0, 
Integer.class)));
+
+  static final Schema SCHEMA =
+      new Schema(
+          ImmutableList.of(
+              Types.NestedField.required(1, "id", Types.IntegerType.get()),
+              Types.NestedField.required(2, "data", Types.StringType.get()),
+              MetadataColumns.ROW_ID,
+              MetadataColumns.LAST_UPDATED_SEQUENCE_NUMBER));
+
+  static final List<Record> INITIAL_RECORDS =
+      ImmutableList.of(
+          createRecord(SCHEMA, 100, "a", 0L, 1L),
+          createRecord(SCHEMA, 101, "b", 1L, 1L),
+          createRecord(SCHEMA, 102, "c", 2L, 1L),
+          createRecord(SCHEMA, 103, "d", 3L, 1L),
+          createRecord(SCHEMA, 104, "e", 4L, 1L));
+
+  @BeforeAll
+  public static void setupSparkConf() {
+    spark.conf().set("spark.sql.shuffle.partitions", "4");
+  }
+
+  @BeforeEach
+  public void beforeEach() {
+    assumeThat(formatVersion).isGreaterThanOrEqualTo(3);
+    // ToDo: Remove these as row lineage inheritance gets implemented in the 
other readers
+    assumeThat(fileFormat).isEqualTo(FileFormat.PARQUET);

Review Comment:
   maybe worth overriding `parameters()` in `TestRowLevelOperationsWithLineage` 
and defining a smaller test matrix, wdyt?



##########
spark/v4.0/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/SparkRowLevelOperationsTestBase.java:
##########
@@ -177,7 +177,19 @@ public static Object[][] parameters() {
         SparkCatalog.class.getName(),
         ImmutableMap.of("type", "hadoop"),
         FileFormat.PARQUET,
-        RANDOM.nextBoolean(),
+        true,

Review Comment:
   +1 I think it's easier to just seperate this out into its own PR



##########
spark/v4.0/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeltaWrite.java:
##########
@@ -426,17 +428,35 @@ public DeltaWriter<InternalRow> createWriter(int 
partitionId, long taskId) {
               .writeProperties(writeProperties)
               .build();
 
+      Function<InternalRow, InternalRow> extractRowLineage =

Review Comment:
   nit: maybe `rowLineageExtractor` or something along those lines? I only 
mention this because `extractRowLineage` sounds like a boolean flag



##########
spark/v4.0/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeltaOperation.java:
##########
@@ -98,6 +99,13 @@ public DeltaWriteBuilder newWriteBuilder(LogicalWriteInfo 
info) {
   public NamedReference[] requiredMetadataAttributes() {
     NamedReference specId = Expressions.column(MetadataColumns.SPEC_ID.name());
     NamedReference partition = 
Expressions.column(MetadataColumns.PARTITION_COLUMN_NAME);
+    if (TableUtil.supportsRowLineage(table)) {

Review Comment:
   nit: I'm fine either way but I think it would be could to align how this is 
done here (stores named references in an array) vs in 
`SparkCopyOnWriteOperation` (which stores named references in a list)



##########
spark/v4.0/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestRowLevelOperationsWithLineage.java:
##########
@@ -0,0 +1,491 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark.extensions;
+
+import static org.apache.iceberg.MetadataColumns.schemaWithRowLineage;
+import static org.apache.iceberg.spark.Spark3Util.loadIcebergTable;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assumptions.assumeThat;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.FileFormat;
+import org.apache.iceberg.Files;
+import org.apache.iceberg.MetadataColumns;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.SnapshotRef;
+import org.apache.iceberg.StructLike;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TestHelpers;
+import org.apache.iceberg.data.GenericAppenderFactory;
+import org.apache.iceberg.data.GenericRecord;
+import org.apache.iceberg.data.Record;
+import org.apache.iceberg.encryption.EncryptionUtil;
+import org.apache.iceberg.io.DataWriter;
+import org.apache.iceberg.io.OutputFile;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
+import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.spark.functions.BucketFunction;
+import org.apache.iceberg.types.Types;
+import org.apache.iceberg.util.Pair;
+import org.apache.iceberg.util.PartitionMap;
+import org.apache.spark.sql.catalyst.analysis.NoSuchTableException;
+import org.apache.spark.sql.catalyst.parser.ParseException;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.TestTemplate;
+
+public abstract class TestRowLevelOperationsWithLineage extends 
SparkRowLevelOperationsTestBase {
+  static final Function<StructLike, StructLike> BUCKET_PARTITION_GENERATOR =
+      record ->
+          TestHelpers.Row.of(BucketFunction.BucketInt.invoke(2, record.get(0, 
Integer.class)));
+
+  static final Schema SCHEMA =
+      new Schema(
+          ImmutableList.of(
+              Types.NestedField.required(1, "id", Types.IntegerType.get()),
+              Types.NestedField.required(2, "data", Types.StringType.get()),
+              MetadataColumns.ROW_ID,
+              MetadataColumns.LAST_UPDATED_SEQUENCE_NUMBER));
+
+  static final List<Record> INITIAL_RECORDS =
+      ImmutableList.of(
+          createRecord(SCHEMA, 100, "a", 0L, 1L),
+          createRecord(SCHEMA, 101, "b", 1L, 1L),
+          createRecord(SCHEMA, 102, "c", 2L, 1L),
+          createRecord(SCHEMA, 103, "d", 3L, 1L),
+          createRecord(SCHEMA, 104, "e", 4L, 1L));
+
+  @BeforeAll
+  public static void setupSparkConf() {
+    spark.conf().set("spark.sql.shuffle.partitions", "4");
+  }
+
+  @BeforeEach
+  public void beforeEach() {
+    assumeThat(formatVersion).isGreaterThanOrEqualTo(3);
+    // ToDo: Remove these as row lineage inheritance gets implemented in the 
other readers
+    assumeThat(fileFormat).isEqualTo(FileFormat.PARQUET);
+  }
+
+  @AfterEach
+  public void removeTables() {
+    sql("DROP TABLE IF EXISTS %s", tableName);
+    sql("DROP TABLE IF EXISTS source");
+  }
+
+  @TestTemplate
+  public void testMergeIntoWithBothMatchedAndNonMatched()
+      throws NoSuchTableException, ParseException, IOException {
+    createAndInitTable("id INT, data STRING", null);
+    createBranchIfNeeded();
+    Table table = loadIcebergTable(spark, tableName);
+    appendUnpartitionedRecords(table, INITIAL_RECORDS);
+    createOrReplaceView(
+        "source",
+        "id int, data string",
+        "{ \"id\": 101, \"data\": \"updated_b\" }\n " + "{ \"id\": 200, 
\"data\": \"f\" }\n");
+    sql(
+        "MERGE INTO %s AS t USING source AS s "
+            + "ON t.id == s.id "
+            + "WHEN MATCHED THEN "
+            + "  UPDATE SET t.data = s.data "
+            + "WHEN NOT MATCHED THEN "
+            + "  INSERT *",
+        commitTarget());
+
+    Snapshot updateSnapshot = latestSnapshot(table);
+    long updateSnapshotFirstRowId = updateSnapshot.firstRowId();
+    List<Object[]> allRows = rowsWithLineageAndFilePos();
+    List<Object[]> carriedOverAndUpdatedRows =
+        allRows.stream()
+            .filter(row -> (long) row[3] < updateSnapshotFirstRowId)
+            .collect(Collectors.toList());
+
+    // Project sequence numbers first for easier comparison on the added row
+    assertEquals(
+        "Rows which are carried over or updated should have expected lineage",
+        ImmutableList.of(
+            row(1L, 100, "a", 0L, ANY, ANY),
+            row(updateSnapshot.sequenceNumber(), 101, "updated_b", 1L, ANY, 
ANY),
+            row(1L, 102, "c", 2L, ANY, ANY),
+            row(1L, 103, "d", 3L, ANY, ANY),
+            row(1L, 104, "e", 4L, ANY, ANY)),
+        carriedOverAndUpdatedRows);
+
+    Object[] newRow =
+        Iterables.getOnlyElement(
+            allRows.stream()
+                .filter(row -> (long) row[3] >= updateSnapshotFirstRowId)
+                .collect(Collectors.toList()));
+    assertAddedRowLineage(row(updateSnapshot.sequenceNumber(), 200, "f"), 
newRow);
+  }
+
+  @TestTemplate
+  public void testMergeIntoWithBothMatchedAndNonMatchedPartitioned()
+      throws NoSuchTableException, ParseException, IOException {
+    createAndInitTable("id INT, data STRING", "PARTITIONED BY (bucket(2, 
id))", null);
+    createBranchIfNeeded();
+    Table table = loadIcebergTable(spark, tableName);
+    appendRecords(
+        table, partitionRecords(INITIAL_RECORDS, table.spec(), 
BUCKET_PARTITION_GENERATOR));
+    createOrReplaceView(
+        "source",
+        "id int, data string",
+        "{ \"id\": 101, \"data\": \"updated_b\" }\n " + "{ \"id\": 200, 
\"data\": \"f\" }\n");
+    sql(
+        "MERGE INTO %s AS t USING source AS s "
+            + "ON t.id == s.id "
+            + "WHEN MATCHED THEN "
+            + "  UPDATE SET t.data = s.data "
+            + "WHEN NOT MATCHED THEN "
+            + "  INSERT *",
+        commitTarget());
+
+    Snapshot updateSnapshot = latestSnapshot(table);
+    long updateSnapshotFirstRowId = updateSnapshot.firstRowId();
+    List<Object[]> allRows = rowsWithLineageAndFilePos();
+
+    List<Object[]> carriedOverAndUpdatedRows =
+        allRows.stream()
+            .filter(row -> (long) row[3] < updateSnapshotFirstRowId)
+            .collect(Collectors.toList());
+
+    // Project sequence numbers first for easier comparison on the added row
+    assertEquals(
+        "Rows which are carried over or updated should have expected lineage",
+        ImmutableList.of(
+            row(1L, 100, "a", 0L, ANY, ANY),
+            row(updateSnapshot.sequenceNumber(), 101, "updated_b", 1L, ANY, 
ANY),
+            row(1L, 102, "c", 2L, ANY, ANY),
+            row(1L, 103, "d", 3L, ANY, ANY),
+            row(1L, 104, "e", 4L, ANY, ANY)),
+        carriedOverAndUpdatedRows);
+
+    Object[] newRow =
+        Iterables.getOnlyElement(
+            allRows.stream()
+                .filter(row -> (long) row[3] >= updateSnapshotFirstRowId)
+                .collect(Collectors.toList()));
+    assertAddedRowLineage(row(updateSnapshot.sequenceNumber(), 200, "f"), 
newRow);
+  }
+
+  @TestTemplate
+  public void testMergeIntoWithOnlyNonMatched()
+      throws NoSuchTableException, ParseException, IOException {
+    createAndInitTable("id INT, data string", null);
+    createBranchIfNeeded();
+    Table table = loadIcebergTable(spark, tableName);
+    appendUnpartitionedRecords(table, INITIAL_RECORDS);
+    createOrReplaceView(
+        "source",
+        "id INT, data STRING",
+        "{ \"id\": 101, \"data\": \"updated_b\" }\n " + "{ \"id\": 200, 
\"data\": \"f\" }\n");
+
+    sql(
+        "MERGE INTO %s AS t USING source AS s "
+            + "ON t.id == s.id "
+            + "WHEN NOT MATCHED THEN "
+            + "INSERT *",
+        commitTarget());
+
+    Snapshot updateSnapshot = latestSnapshot(table);
+    long updateSnapshotFirstRowId = updateSnapshot.firstRowId();
+
+    List<Object[]> allRows = rowsWithLineageAndFilePos();
+    List<Object[]> carriedOverAndUpdatedRows =
+        allRows.stream()
+            .filter(row -> (long) row[3] < updateSnapshotFirstRowId)
+            .collect(Collectors.toList());
+
+    // Project sequence numbers first for easier comparison on the added row
+    assertEquals(
+        "Rows which are carried over or updated should have expected lineage",
+        ImmutableList.of(
+            row(1L, 100, "a", 0L, ANY, ANY),
+            row(1L, 101, "b", 1L, ANY, ANY),
+            row(1L, 102, "c", 2L, ANY, ANY),
+            row(1L, 103, "d", 3L, ANY, ANY),
+            row(1L, 104, "e", 4L, ANY, ANY)),
+        carriedOverAndUpdatedRows);
+
+    Object[] newRow =
+        Iterables.getOnlyElement(
+            allRows.stream()
+                .filter(row -> (long) row[3] >= updateSnapshotFirstRowId)
+                .collect(Collectors.toList()));
+    assertAddedRowLineage(row(updateSnapshot.sequenceNumber(), 200, "f"), 
newRow);
+  }
+
+  @TestTemplate
+  public void testMergeIntoWithOnlyMatched()
+      throws IOException, NoSuchTableException, ParseException {
+    createAndInitTable("id INT, data STRING", null);
+    createBranchIfNeeded();
+    Table table = loadIcebergTable(spark, tableName);
+    appendUnpartitionedRecords(table, INITIAL_RECORDS);
+    createOrReplaceView(
+        "source",
+        "id INT, data string",
+        "{ \"id\": 101, \"data\": \"updated_b\" }\n "
+            + "{ \"id\": 102, \"data\": \"updated_c\" }\n");
+
+    sql(
+        "MERGE INTO %s AS t USING source AS s "
+            + "ON t.id == s.id "
+            + "WHEN MATCHED THEN "
+            + "  UPDATE SET t.data = s.data ",
+        commitTarget());
+
+    long updateSequenceNumber = latestSnapshot(table).sequenceNumber();
+    assertEquals(
+        "Rows which are carried over or updated should have expected lineage",
+        ImmutableList.of(
+            row(100, "a", 0L, 1L),
+            row(101, "updated_b", 1L, updateSequenceNumber),
+            row(102, "updated_c", 2L, updateSequenceNumber),
+            row(103, "d", 3L, 1L),
+            row(104, "e", 4L, 1L)),
+        rowsWithLineage());
+  }
+
+  @TestTemplate
+  public void testMergeMatchedDelete() throws NoSuchTableException, 
ParseException, IOException {
+    createAndInitTable("id INT, data STRING", null);
+    createBranchIfNeeded();
+    Table table = loadIcebergTable(spark, tableName);
+    appendUnpartitionedRecords(table, INITIAL_RECORDS);
+    createOrReplaceView(
+        "source",
+        "id INT, data string",
+        "{ \"id\": 101, \"data\": \"delete_101\" }\n "
+            + "{ \"id\": 102, \"data\": \"delete_102\" }\n");
+    sql(
+        "MERGE INTO %s AS t USING source AS s " + "ON t.id == s.id " + "WHEN 
MATCHED THEN DELETE",
+        commitTarget());
+
+    assertEquals(
+        "Rows which are carried over or updated should have expected lineage",
+        ImmutableList.of(row(100, "a", 0L, 1L), row(103, "d", 3L, 1L), 
row(104, "e", 4L, 1L)),
+        rowsWithLineage());
+  }
+
+  @TestTemplate
+  public void testMergeWhenNotMatchedBySource()
+      throws NoSuchTableException, ParseException, IOException {
+    createAndInitTable("id INT, data STRING", null);
+    createBranchIfNeeded();
+    Table table = loadIcebergTable(spark, tableName);
+    appendUnpartitionedRecords(table, INITIAL_RECORDS);
+    createOrReplaceView(
+        "source",
+        "id INT, data STRING",
+        "{ \"id\": 101, \"data\": \"updated_b\" }\n " + "{ \"id\": 200, 
\"data\": \"f\" }\n");
+
+    sql(
+        "MERGE INTO %s AS t USING source AS s ON t.id == s.id"
+            + " WHEN MATCHED THEN UPDATE set t.data = s.data "
+            + "WHEN NOT MATCHED BY SOURCE THEN UPDATE set data = 
'not_matched_by_source'",
+        commitTarget());
+
+    long updateSequenceNumber = latestSnapshot(table).sequenceNumber();
+
+    assertEquals(
+        "Rows which are carried over or updated should have expected lineage",
+        ImmutableList.of(
+            row(100, "not_matched_by_source", 0L, updateSequenceNumber),
+            row(101, "updated_b", 1L, updateSequenceNumber),
+            row(102, "not_matched_by_source", 2L, updateSequenceNumber),
+            row(103, "not_matched_by_source", 3L, updateSequenceNumber),
+            row(104, "not_matched_by_source", 4L, updateSequenceNumber)),
+        rowsWithLineage());
+  }
+
+  @TestTemplate
+  public void testMergeWhenNotMatchedBySourceDelete()
+      throws NoSuchTableException, ParseException, IOException {
+    createAndInitTable("id INT, data STRING", null);
+    createBranchIfNeeded();
+    Table table = loadIcebergTable(spark, tableName);
+    appendUnpartitionedRecords(table, INITIAL_RECORDS);
+    createOrReplaceView(
+        "source",
+        "id INT, data STRING",
+        "{ \"id\": 101, \"data\": \"updated_b\" }\n "
+            + "{ \"id\": 102, \"data\": \"updated_c\" }\n");
+
+    sql(
+        "MERGE INTO %s AS t USING source AS s ON t.id == s.id"
+            + " WHEN MATCHED THEN UPDATE set t.data = s.data "
+            + "WHEN NOT MATCHED BY SOURCE THEN DELETE",
+        commitTarget());
+
+    long updateSequenceNumber = latestSnapshot(table).sequenceNumber();
+    assertEquals(
+        "Rows which are carried over or updated should have expected lineage",
+        ImmutableList.of(
+            row(101, "updated_b", 1L, updateSequenceNumber),
+            row(102, "updated_c", 2L, updateSequenceNumber)),
+        rowsWithLineage());
+  }
+
+  @TestTemplate
+  public void testUpdate() throws NoSuchTableException, ParseException, 
IOException {
+    createAndInitTable("id INT, data STRING", null);
+    createBranchIfNeeded();
+    Table table = loadIcebergTable(spark, tableName);
+    appendUnpartitionedRecords(table, INITIAL_RECORDS);
+
+    sql("UPDATE %s AS t set data = 'updated_b' WHERE id = 101", 
commitTarget());
+    long updateSequenceNumber = latestSnapshot(table).sequenceNumber();
+
+    assertEquals(
+        "Rows which are carried over or updated should have expected lineage",
+        ImmutableList.of(
+            row(100, "a", 0L, 1L),
+            row(101, "updated_b", 1L, updateSequenceNumber),
+            row(102, "c", 2L, 1L),
+            row(103, "d", 3L, 1L),
+            row(104, "e", 4L, 1L)),
+        rowsWithLineage());
+  }
+
+  @TestTemplate
+  public void testDelete() throws NoSuchTableException, ParseException, 
IOException {
+    assumeThat(formatVersion).isGreaterThanOrEqualTo(3);
+    createAndInitTable("id int, data STRING", null);
+    createBranchIfNeeded();
+    Table table = loadIcebergTable(spark, tableName);
+    appendUnpartitionedRecords(table, INITIAL_RECORDS);
+
+    sql("DELETE FROM %s WHERE id = 101", commitTarget());
+
+    assertEquals(
+        "Rows which are carried over or updated should have expected lineage",
+        ImmutableList.of(
+            row(100, "a", 0L, 1L),
+            row(102, "c", 2L, 1L),
+            row(103, "d", 3L, 1L),
+            row(104, "e", 4L, 1L)),
+        rowsWithLineage());
+  }
+
+  private List<Object[]> rowsWithLineageAndFilePos() {
+    return sql(
+        "SELECT s._last_updated_sequence_number, s.id, s.data, s._row_id, 
files.first_row_id, s._pos FROM %s"
+            + " AS s JOIN %s.files AS files ON files.file_path = s._file ORDER 
BY s._row_id",
+        selectTarget(), selectTarget());
+  }
+
+  private List<Object[]> rowsWithLineage() {
+    return sql(
+        "SELECT id, data, _row_id, _last_updated_sequence_number FROM %s ORDER 
BY _row_id",
+        selectTarget());
+  }
+
+  /**
+   * Partitions the provided records based on the spec and partition function
+   *
+   * @return a partitioned map
+   */
+  protected PartitionMap<List<Record>> partitionRecords(
+      List<Record> records,
+      PartitionSpec spec,
+      Function<StructLike, StructLike> partitionGenerator) {
+    PartitionMap<List<Record>> recordsByPartition =
+        PartitionMap.create(Map.of(spec.specId(), spec));
+    for (Record record : records) {
+      StructLike partition = partitionGenerator != null ? 
partitionGenerator.apply(record) : null;
+      List<Record> recordsForPartition = recordsByPartition.get(spec.specId(), 
partition);
+      if (recordsForPartition == null) {
+        recordsForPartition = Lists.newArrayList();
+      }
+
+      recordsForPartition.add(record);
+      recordsByPartition.put(spec.specId(), partition, recordsForPartition);
+    }
+
+    return recordsByPartition;
+  }
+
+  protected void appendUnpartitionedRecords(Table table, List<Record> records) 
throws IOException {
+    appendRecords(table, partitionRecords(records, table.spec(), record -> 
null));
+  }
+
+  // Append unpartitioned records?

Review Comment:
   nit: is this still needed?



##########
spark/v4.0/spark/src/main/java/org/apache/iceberg/spark/source/SparkWriteBuilder.java:
##########
@@ -116,16 +119,36 @@ public WriteBuilder overwrite(Filter[] filters) {
 
   @Override
   public Write build() {
-    // Validate
-    Schema writeSchema = validateOrMergeWriteSchema(table, dsSchema, 
writeConf);
+    // The write schema should only include row lineage in the output if it's 
an overwrite
+    // operation.
+    // In any other case, only null row IDs and sequence numbers would be 
produced which
+    // means the row lineage columns can be excluded from the output files
+    boolean writeIncludesRowLineage = TableUtil.supportsRowLineage(table) && 
overwriteFiles;
+    StructType sparkWriteSchema = dsSchema;
+    if (writeIncludesRowLineage) {
+      sparkWriteSchema = sparkWriteSchema.add("_row_id", LongType$.MODULE$);

Review Comment:
   +1 on using the constant



##########
spark/v4.0/spark/src/main/java/org/apache/iceberg/spark/source/SparkMetadataColumn.java:
##########
@@ -20,17 +20,40 @@
 
 import org.apache.spark.sql.connector.catalog.MetadataColumn;
 import org.apache.spark.sql.types.DataType;
+import org.apache.spark.sql.types.MetadataBuilder;
 
 public class SparkMetadataColumn implements MetadataColumn {
 
   private final String name;
   private final DataType dataType;
   private final boolean isNullable;
+  private final boolean preserveOnReinsert;
+  private final boolean preserveOnUpdate;
+  private final boolean preserveOnDelete;
 
   public SparkMetadataColumn(String name, DataType dataType, boolean 
isNullable) {
+    this(
+        name,
+        dataType,
+        isNullable,
+        MetadataColumn.PRESERVE_ON_REINSERT_DEFAULT,
+        MetadataColumn.PRESERVE_ON_UPDATE_DEFAULT,
+        MetadataColumn.PRESERVE_ON_DELETE_DEFAULT);
+  }
+
+  public SparkMetadataColumn(
+      String name,
+      DataType dataType,
+      boolean isNullable,

Review Comment:
   +1 to having a builder as I think this is the cleanest approach here



##########
spark/v4.0/spark/src/main/java/org/apache/iceberg/spark/source/SparkWrite.java:
##########
@@ -699,29 +708,41 @@ public DataWriter<InternalRow> createWriter(int 
partitionId, long taskId, long e
             writeSchema,
             dsSchema,
             targetFileSize,
-            useFanoutWriter);
+            useFanoutWriter,
+            extractRowLineage);
       }
     }
   }
 
   private static class UnpartitionedDataWriter implements 
DataWriter<InternalRow> {
     private final FileWriter<InternalRow, DataWriteResult> delegate;
     private final FileIO io;
+    private final Function<InternalRow, InternalRow> 
extractRowLineageFromMetadata;
 
     private UnpartitionedDataWriter(
         SparkFileWriterFactory writerFactory,
         OutputFileFactory fileFactory,
         FileIO io,
         PartitionSpec spec,
-        long targetFileSize) {
+        long targetFileSize,
+        Function<InternalRow, InternalRow> extractRowLineageFromMetadata) {
       this.delegate =
           new RollingDataWriter<>(writerFactory, fileFactory, io, 
targetFileSize, spec, null);
       this.io = io;
+      this.extractRowLineageFromMetadata = extractRowLineageFromMetadata;
     }
 
     @Override
     public void write(InternalRow record) throws IOException {
-      delegate.write(record);
+      write(null, record);

Review Comment:
   I don't think we can because this API is coming from Spark's `DataWriter`



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to