dramaticlly commented on code in PR #9724: URL: https://github.com/apache/iceberg/pull/9724#discussion_r1716093612
########## spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RemoveDanglingDeletesSparkAction.java: ########## @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark.actions; + +import static org.apache.spark.sql.functions.col; +import static org.apache.spark.sql.functions.min; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.DeleteFile; +import org.apache.iceberg.MetadataTableType; +import org.apache.iceberg.Partitioning; +import org.apache.iceberg.RewriteFiles; +import org.apache.iceberg.Table; +import org.apache.iceberg.actions.RemoveDanglingDeleteFiles; +import org.apache.iceberg.actions.RemoveDanglingDeleteFilesActionResult; +import org.apache.iceberg.spark.JobGroupInfo; +import org.apache.iceberg.spark.SparkDeleteFile; +import org.apache.iceberg.types.Types; +import org.apache.spark.sql.Column; +import org.apache.spark.sql.Dataset; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.SparkSession; +import org.apache.spark.sql.types.StructType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An action that removes dangling delete files from the current snapshot. A delete file is dangling + * if its deletes no longer applies to any non-expired data file. + * + * <p>The following dangling delete files are removed: + * + * <ul> + * <li>Position delete files with a data sequence number less than that of any data file in the + * same partition + * <li>Equality delete files with a data sequence number less than or equal to that of any data + * file in the same partition + * </ul> + */ +class RemoveDanglingDeletesSparkAction + extends BaseSnapshotUpdateSparkAction<RemoveDanglingDeletesSparkAction> + implements RemoveDanglingDeleteFiles { + + private static final Logger LOG = LoggerFactory.getLogger(RemoveDanglingDeletesSparkAction.class); + private final Table table; + + protected RemoveDanglingDeletesSparkAction(SparkSession spark, Table table) { + super(spark.cloneSession()); + this.table = table; + } + + @Override + protected RemoveDanglingDeletesSparkAction self() { + return this; + } + + public Result execute() { + if (table.specs().size() == 1 && table.spec().isUnpartitioned()) { + // ManifestFilterManager already performs this table-wide delete on each commit + return new RemoveDanglingDeleteFilesActionResult(Collections.emptyList()); + } + + String desc = String.format("Removing dangling delete in %s", table.name()); + JobGroupInfo info = newJobGroupInfo("REMOVE-DELETES", desc); + return withJobGroupInfo(info, this::doExecute); + } + + Result doExecute() { + RewriteFiles rewriteFiles = table.newRewrite(); + List<DeleteFile> danglingDeletes = findDanglingDeletes(); + for (DeleteFile deleteFile : danglingDeletes) { + LOG.debug("Removing dangling delete file {}", deleteFile.path()); + rewriteFiles.deleteFile(deleteFile); + } + + if (!danglingDeletes.isEmpty()) { + commit(rewriteFiles); + } + + return new RemoveDanglingDeleteFilesActionResult(danglingDeletes); + } + + /** + * Dangling delete files can be identified with following steps + * + * <p>1. Query live data entries table to group by partition spec ID and partition value aggregate + * to compute min data sequence number per group + * + * <p>2. Left join live delete entries table on grouped partition spec ID and partition value to + * account for partition evolution + * + * <p>3. Filter to identify dangling deletes that can be discarded by comparing its data sequence + * number having single predicate to account for both position and equality deletes + * + * <p>4. Collect results row to driver and use {@link SparkDeleteFile SparkDeleteFile} to wrap + * rows to valid delete files + */ + private List<DeleteFile> findDanglingDeletes() { + Dataset<Row> minSequenceNumberByPartition = + loadMetadataTable(table, MetadataTableType.ENTRIES) + .filter(" data_file.content == 0 AND status < 2") Review Comment: sounds good! ########## api/src/main/java/org/apache/iceberg/actions/RewriteDataFiles.java: ########## @@ -106,6 +106,19 @@ public interface RewriteDataFiles boolean USE_STARTING_SEQUENCE_NUMBER_DEFAULT = true; + /** + * Remove dangling delete files from the current snapshot after compaction. A delete file is + * considered dangling if it does not apply to any non-expired data file. Review Comment: make sense! Updated to live data files ########## spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RemoveDanglingDeletesSparkAction.java: ########## @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark.actions; + +import static org.apache.spark.sql.functions.col; +import static org.apache.spark.sql.functions.min; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.DeleteFile; +import org.apache.iceberg.MetadataTableType; +import org.apache.iceberg.Partitioning; +import org.apache.iceberg.RewriteFiles; +import org.apache.iceberg.Table; +import org.apache.iceberg.actions.RemoveDanglingDeleteFiles; +import org.apache.iceberg.actions.RemoveDanglingDeleteFilesActionResult; +import org.apache.iceberg.spark.JobGroupInfo; +import org.apache.iceberg.spark.SparkDeleteFile; +import org.apache.iceberg.types.Types; +import org.apache.spark.sql.Column; +import org.apache.spark.sql.Dataset; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.SparkSession; +import org.apache.spark.sql.types.StructType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An action that removes dangling delete files from the current snapshot. A delete file is dangling + * if its deletes no longer applies to any non-expired data file. + * + * <p>The following dangling delete files are removed: + * + * <ul> + * <li>Position delete files with a data sequence number less than that of any data file in the + * same partition + * <li>Equality delete files with a data sequence number less than or equal to that of any data + * file in the same partition + * </ul> + */ +class RemoveDanglingDeletesSparkAction + extends BaseSnapshotUpdateSparkAction<RemoveDanglingDeletesSparkAction> + implements RemoveDanglingDeleteFiles { + + private static final Logger LOG = LoggerFactory.getLogger(RemoveDanglingDeletesSparkAction.class); + private final Table table; + + protected RemoveDanglingDeletesSparkAction(SparkSession spark, Table table) { + super(spark.cloneSession()); + this.table = table; + } + + @Override + protected RemoveDanglingDeletesSparkAction self() { + return this; + } + + public Result execute() { + if (table.specs().size() == 1 && table.spec().isUnpartitioned()) { + // ManifestFilterManager already performs this table-wide delete on each commit + return new RemoveDanglingDeleteFilesActionResult(Collections.emptyList()); + } + + String desc = String.format("Removing dangling delete in %s", table.name()); + JobGroupInfo info = newJobGroupInfo("REMOVE-DELETES", desc); + return withJobGroupInfo(info, this::doExecute); + } + + Result doExecute() { + RewriteFiles rewriteFiles = table.newRewrite(); + List<DeleteFile> danglingDeletes = findDanglingDeletes(); + for (DeleteFile deleteFile : danglingDeletes) { + LOG.debug("Removing dangling delete file {}", deleteFile.path()); + rewriteFiles.deleteFile(deleteFile); + } + + if (!danglingDeletes.isEmpty()) { + commit(rewriteFiles); + } + + return new RemoveDanglingDeleteFilesActionResult(danglingDeletes); + } + + /** + * Dangling delete files can be identified with following steps + * + * <p>1. Query live data entries table to group by partition spec ID and partition value aggregate + * to compute min data sequence number per group + * + * <p>2. Left join live delete entries table on grouped partition spec ID and partition value to + * account for partition evolution + * + * <p>3. Filter to identify dangling deletes that can be discarded by comparing its data sequence + * number having single predicate to account for both position and equality deletes + * + * <p>4. Collect results row to driver and use {@link SparkDeleteFile SparkDeleteFile} to wrap + * rows to valid delete files + */ + private List<DeleteFile> findDanglingDeletes() { + Dataset<Row> minSequenceNumberByPartition = + loadMetadataTable(table, MetadataTableType.ENTRIES) + .filter(" data_file.content == 0 AND status < 2") + .selectExpr( + "data_file.partition as partition", + "data_file.spec_id as spec_id", + "sequence_number") + .groupBy("partition", "spec_id") + .agg(min("sequence_number")) + .toDF("grouped_partition", "grouped_spec_id", "min_data_sequence_number"); + + Dataset<Row> deleteEntries = + loadMetadataTable(table, MetadataTableType.ENTRIES) + .filter(" data_file.content != 0 AND status < 2"); + + Column joinCond = + deleteEntries + .col("data_file.spec_id") + .equalTo(minSequenceNumberByPartition.col("grouped_spec_id")) + .and( + deleteEntries + .col("data_file.partition") + .equalTo(minSequenceNumberByPartition.col("grouped_partition"))); + + Column filterCondition = + col("min_data_sequence_number") + .isNull() Review Comment: yeah this is account for corner case where all of the data files in a partition experienced with partition evolution and only the delete files are stuck with old spec, result in null `min_data_sequence_number`. Let me add a comment here as well ########## spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RemoveDanglingDeletesSparkAction.java: ########## @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark.actions; + +import static org.apache.spark.sql.functions.col; +import static org.apache.spark.sql.functions.min; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.DeleteFile; +import org.apache.iceberg.MetadataTableType; +import org.apache.iceberg.Partitioning; +import org.apache.iceberg.RewriteFiles; +import org.apache.iceberg.Table; +import org.apache.iceberg.actions.RemoveDanglingDeleteFiles; +import org.apache.iceberg.actions.RemoveDanglingDeleteFilesActionResult; +import org.apache.iceberg.spark.JobGroupInfo; +import org.apache.iceberg.spark.SparkDeleteFile; +import org.apache.iceberg.types.Types; +import org.apache.spark.sql.Column; +import org.apache.spark.sql.Dataset; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.SparkSession; +import org.apache.spark.sql.types.StructType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An action that removes dangling delete files from the current snapshot. A delete file is dangling + * if its deletes no longer applies to any non-expired data file. + * + * <p>The following dangling delete files are removed: + * + * <ul> + * <li>Position delete files with a data sequence number less than that of any data file in the + * same partition + * <li>Equality delete files with a data sequence number less than or equal to that of any data + * file in the same partition + * </ul> + */ +class RemoveDanglingDeletesSparkAction + extends BaseSnapshotUpdateSparkAction<RemoveDanglingDeletesSparkAction> + implements RemoveDanglingDeleteFiles { + + private static final Logger LOG = LoggerFactory.getLogger(RemoveDanglingDeletesSparkAction.class); + private final Table table; + + protected RemoveDanglingDeletesSparkAction(SparkSession spark, Table table) { + super(spark.cloneSession()); + this.table = table; + } + + @Override + protected RemoveDanglingDeletesSparkAction self() { + return this; + } + + public Result execute() { + if (table.specs().size() == 1 && table.spec().isUnpartitioned()) { + // ManifestFilterManager already performs this table-wide delete on each commit + return new RemoveDanglingDeleteFilesActionResult(Collections.emptyList()); + } + + String desc = String.format("Removing dangling delete in %s", table.name()); + JobGroupInfo info = newJobGroupInfo("REMOVE-DELETES", desc); + return withJobGroupInfo(info, this::doExecute); + } + + Result doExecute() { + RewriteFiles rewriteFiles = table.newRewrite(); + List<DeleteFile> danglingDeletes = findDanglingDeletes(); + for (DeleteFile deleteFile : danglingDeletes) { + LOG.debug("Removing dangling delete file {}", deleteFile.path()); + rewriteFiles.deleteFile(deleteFile); + } + + if (!danglingDeletes.isEmpty()) { + commit(rewriteFiles); + } + + return new RemoveDanglingDeleteFilesActionResult(danglingDeletes); Review Comment: I actually used the count in RewriteDataFiles.Results but the actual collection here, as it would be easier to verify the correctness of files get removed in the tests. Would switch from List to Iterable help in term of memory? ########## spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java: ########## @@ -82,8 +83,9 @@ public class RewriteDataFilesSparkAction PARTIAL_PROGRESS_MAX_FAILED_COMMITS, TARGET_FILE_SIZE_BYTES, USE_STARTING_SEQUENCE_NUMBER, - REWRITE_JOB_ORDER, - OUTPUT_SPEC_ID); + OUTPUT_SPEC_ID, Review Comment: my bad, this is probably rebase error and I am append to the bottom now ########## spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RemoveDanglingDeletesSparkAction.java: ########## @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark.actions; + +import static org.apache.spark.sql.functions.col; +import static org.apache.spark.sql.functions.min; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.DeleteFile; +import org.apache.iceberg.MetadataTableType; +import org.apache.iceberg.Partitioning; +import org.apache.iceberg.RewriteFiles; +import org.apache.iceberg.Table; +import org.apache.iceberg.actions.RemoveDanglingDeleteFiles; +import org.apache.iceberg.actions.RemoveDanglingDeleteFilesActionResult; +import org.apache.iceberg.spark.JobGroupInfo; +import org.apache.iceberg.spark.SparkDeleteFile; +import org.apache.iceberg.types.Types; +import org.apache.spark.sql.Column; +import org.apache.spark.sql.Dataset; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.SparkSession; +import org.apache.spark.sql.types.StructType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An action that removes dangling delete files from the current snapshot. A delete file is dangling + * if its deletes no longer applies to any non-expired data file. + * + * <p>The following dangling delete files are removed: + * + * <ul> + * <li>Position delete files with a data sequence number less than that of any data file in the + * same partition + * <li>Equality delete files with a data sequence number less than or equal to that of any data + * file in the same partition + * </ul> + */ +class RemoveDanglingDeletesSparkAction + extends BaseSnapshotUpdateSparkAction<RemoveDanglingDeletesSparkAction> + implements RemoveDanglingDeleteFiles { + + private static final Logger LOG = LoggerFactory.getLogger(RemoveDanglingDeletesSparkAction.class); + private final Table table; + + protected RemoveDanglingDeletesSparkAction(SparkSession spark, Table table) { + super(spark.cloneSession()); + this.table = table; + } + + @Override + protected RemoveDanglingDeletesSparkAction self() { + return this; + } + + public Result execute() { + if (table.specs().size() == 1 && table.spec().isUnpartitioned()) { + // ManifestFilterManager already performs this table-wide delete on each commit + return new RemoveDanglingDeleteFilesActionResult(Collections.emptyList()); + } + + String desc = String.format("Removing dangling delete in %s", table.name()); + JobGroupInfo info = newJobGroupInfo("REMOVE-DELETES", desc); + return withJobGroupInfo(info, this::doExecute); + } + + Result doExecute() { + RewriteFiles rewriteFiles = table.newRewrite(); + List<DeleteFile> danglingDeletes = findDanglingDeletes(); + for (DeleteFile deleteFile : danglingDeletes) { + LOG.debug("Removing dangling delete file {}", deleteFile.path()); + rewriteFiles.deleteFile(deleteFile); + } + + if (!danglingDeletes.isEmpty()) { + commit(rewriteFiles); + } + + return new RemoveDanglingDeleteFilesActionResult(danglingDeletes); + } + + /** + * Dangling delete files can be identified with following steps + * + * <p>1. Query live data entries table to group by partition spec ID and partition value aggregate + * to compute min data sequence number per group + * + * <p>2. Left join live delete entries table on grouped partition spec ID and partition value to + * account for partition evolution + * + * <p>3. Filter to identify dangling deletes that can be discarded by comparing its data sequence + * number having single predicate to account for both position and equality deletes + * + * <p>4. Collect results row to driver and use {@link SparkDeleteFile SparkDeleteFile} to wrap + * rows to valid delete files + */ + private List<DeleteFile> findDanglingDeletes() { + Dataset<Row> minSequenceNumberByPartition = + loadMetadataTable(table, MetadataTableType.ENTRIES) + .filter(" data_file.content == 0 AND status < 2") + .selectExpr( + "data_file.partition as partition", + "data_file.spec_id as spec_id", + "sequence_number") + .groupBy("partition", "spec_id") + .agg(min("sequence_number")) + .toDF("grouped_partition", "grouped_spec_id", "min_data_sequence_number"); + + Dataset<Row> deleteEntries = + loadMetadataTable(table, MetadataTableType.ENTRIES) + .filter(" data_file.content != 0 AND status < 2"); + + Column joinCond = Review Comment: updated to `joinOnPartition` ########## spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RemoveDanglingDeletesSparkAction.java: ########## @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark.actions; + +import static org.apache.spark.sql.functions.col; +import static org.apache.spark.sql.functions.min; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.DeleteFile; +import org.apache.iceberg.MetadataTableType; +import org.apache.iceberg.Partitioning; +import org.apache.iceberg.RewriteFiles; +import org.apache.iceberg.Table; +import org.apache.iceberg.actions.RemoveDanglingDeleteFiles; +import org.apache.iceberg.actions.RemoveDanglingDeleteFilesActionResult; +import org.apache.iceberg.spark.JobGroupInfo; +import org.apache.iceberg.spark.SparkDeleteFile; +import org.apache.iceberg.types.Types; +import org.apache.spark.sql.Column; +import org.apache.spark.sql.Dataset; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.SparkSession; +import org.apache.spark.sql.types.StructType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An action that removes dangling delete files from the current snapshot. A delete file is dangling + * if its deletes no longer applies to any non-expired data file. + * + * <p>The following dangling delete files are removed: + * + * <ul> + * <li>Position delete files with a data sequence number less than that of any data file in the + * same partition + * <li>Equality delete files with a data sequence number less than or equal to that of any data + * file in the same partition + * </ul> + */ +class RemoveDanglingDeletesSparkAction + extends BaseSnapshotUpdateSparkAction<RemoveDanglingDeletesSparkAction> + implements RemoveDanglingDeleteFiles { + + private static final Logger LOG = LoggerFactory.getLogger(RemoveDanglingDeletesSparkAction.class); + private final Table table; + + protected RemoveDanglingDeletesSparkAction(SparkSession spark, Table table) { + super(spark.cloneSession()); + this.table = table; + } + + @Override + protected RemoveDanglingDeletesSparkAction self() { + return this; + } + + public Result execute() { + if (table.specs().size() == 1 && table.spec().isUnpartitioned()) { + // ManifestFilterManager already performs this table-wide delete on each commit + return new RemoveDanglingDeleteFilesActionResult(Collections.emptyList()); + } + + String desc = String.format("Removing dangling delete in %s", table.name()); + JobGroupInfo info = newJobGroupInfo("REMOVE-DELETES", desc); + return withJobGroupInfo(info, this::doExecute); + } + + Result doExecute() { + RewriteFiles rewriteFiles = table.newRewrite(); + List<DeleteFile> danglingDeletes = findDanglingDeletes(); + for (DeleteFile deleteFile : danglingDeletes) { + LOG.debug("Removing dangling delete file {}", deleteFile.path()); + rewriteFiles.deleteFile(deleteFile); + } + + if (!danglingDeletes.isEmpty()) { + commit(rewriteFiles); + } + + return new RemoveDanglingDeleteFilesActionResult(danglingDeletes); + } + + /** + * Dangling delete files can be identified with following steps + * + * <p>1. Query live data entries table to group by partition spec ID and partition value aggregate + * to compute min data sequence number per group + * + * <p>2. Left join live delete entries table on grouped partition spec ID and partition value to + * account for partition evolution + * + * <p>3. Filter to identify dangling deletes that can be discarded by comparing its data sequence + * number having single predicate to account for both position and equality deletes + * + * <p>4. Collect results row to driver and use {@link SparkDeleteFile SparkDeleteFile} to wrap + * rows to valid delete files + */ + private List<DeleteFile> findDanglingDeletes() { + Dataset<Row> minSequenceNumberByPartition = + loadMetadataTable(table, MetadataTableType.ENTRIES) + .filter(" data_file.content == 0 AND status < 2") + .selectExpr( + "data_file.partition as partition", + "data_file.spec_id as spec_id", + "sequence_number") + .groupBy("partition", "spec_id") + .agg(min("sequence_number")) + .toDF("grouped_partition", "grouped_spec_id", "min_data_sequence_number"); + + Dataset<Row> deleteEntries = + loadMetadataTable(table, MetadataTableType.ENTRIES) + .filter(" data_file.content != 0 AND status < 2"); + + Column joinCond = + deleteEntries + .col("data_file.spec_id") + .equalTo(minSequenceNumberByPartition.col("grouped_spec_id")) + .and( + deleteEntries + .col("data_file.partition") + .equalTo(minSequenceNumberByPartition.col("grouped_partition"))); + + Column filterCondition = Review Comment: I used `danglingDelete` below to represent the collection of deletes to be purged, let me come up with a more meaningful name ########## spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RemoveDanglingDeletesSparkAction.java: ########## @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark.actions; + +import static org.apache.spark.sql.functions.col; +import static org.apache.spark.sql.functions.min; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.DeleteFile; +import org.apache.iceberg.MetadataTableType; +import org.apache.iceberg.Partitioning; +import org.apache.iceberg.RewriteFiles; +import org.apache.iceberg.Table; +import org.apache.iceberg.actions.RemoveDanglingDeleteFiles; +import org.apache.iceberg.actions.RemoveDanglingDeleteFilesActionResult; +import org.apache.iceberg.spark.JobGroupInfo; +import org.apache.iceberg.spark.SparkDeleteFile; +import org.apache.iceberg.types.Types; +import org.apache.spark.sql.Column; +import org.apache.spark.sql.Dataset; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.SparkSession; +import org.apache.spark.sql.types.StructType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An action that removes dangling delete files from the current snapshot. A delete file is dangling + * if its deletes no longer applies to any non-expired data file. + * + * <p>The following dangling delete files are removed: + * + * <ul> + * <li>Position delete files with a data sequence number less than that of any data file in the + * same partition + * <li>Equality delete files with a data sequence number less than or equal to that of any data + * file in the same partition + * </ul> + */ +class RemoveDanglingDeletesSparkAction + extends BaseSnapshotUpdateSparkAction<RemoveDanglingDeletesSparkAction> + implements RemoveDanglingDeleteFiles { + + private static final Logger LOG = LoggerFactory.getLogger(RemoveDanglingDeletesSparkAction.class); + private final Table table; + + protected RemoveDanglingDeletesSparkAction(SparkSession spark, Table table) { + super(spark.cloneSession()); + this.table = table; + } + + @Override + protected RemoveDanglingDeletesSparkAction self() { + return this; + } + + public Result execute() { + if (table.specs().size() == 1 && table.spec().isUnpartitioned()) { + // ManifestFilterManager already performs this table-wide delete on each commit + return new RemoveDanglingDeleteFilesActionResult(Collections.emptyList()); + } + + String desc = String.format("Removing dangling delete in %s", table.name()); + JobGroupInfo info = newJobGroupInfo("REMOVE-DELETES", desc); + return withJobGroupInfo(info, this::doExecute); + } + + Result doExecute() { + RewriteFiles rewriteFiles = table.newRewrite(); + List<DeleteFile> danglingDeletes = findDanglingDeletes(); + for (DeleteFile deleteFile : danglingDeletes) { + LOG.debug("Removing dangling delete file {}", deleteFile.path()); + rewriteFiles.deleteFile(deleteFile); + } + + if (!danglingDeletes.isEmpty()) { + commit(rewriteFiles); + } + + return new RemoveDanglingDeleteFilesActionResult(danglingDeletes); + } + + /** + * Dangling delete files can be identified with following steps + * + * <p>1. Query live data entries table to group by partition spec ID and partition value aggregate + * to compute min data sequence number per group + * + * <p>2. Left join live delete entries table on grouped partition spec ID and partition value to + * account for partition evolution + * + * <p>3. Filter to identify dangling deletes that can be discarded by comparing its data sequence + * number having single predicate to account for both position and equality deletes + * + * <p>4. Collect results row to driver and use {@link SparkDeleteFile SparkDeleteFile} to wrap + * rows to valid delete files + */ + private List<DeleteFile> findDanglingDeletes() { + Dataset<Row> minSequenceNumberByPartition = + loadMetadataTable(table, MetadataTableType.ENTRIES) + .filter(" data_file.content == 0 AND status < 2") + .selectExpr( + "data_file.partition as partition", + "data_file.spec_id as spec_id", + "sequence_number") + .groupBy("partition", "spec_id") + .agg(min("sequence_number")) + .toDF("grouped_partition", "grouped_spec_id", "min_data_sequence_number"); + + Dataset<Row> deleteEntries = + loadMetadataTable(table, MetadataTableType.ENTRIES) + .filter(" data_file.content != 0 AND status < 2"); Review Comment: yeah the `status < 2` is for live entry but the `data_file.content !=0` is specific for delete entry instead of data file entry above. But let me add a comment as well -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org