aokolnychyi commented on code in PR #6365: URL: https://github.com/apache/iceberg/pull/6365#discussion_r1083026645
########## core/src/main/java/org/apache/iceberg/BasePositionDeletesScanTask.java: ########## @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg; + +import org.apache.iceberg.expressions.ResidualEvaluator; + +/** Base implememntation of {@link PositionDeletesScanTask} */ +class BasePositionDeletesScanTask extends BaseContentScanTask<PositionDeletesScanTask, DeleteFile> + implements PositionDeletesScanTask, SplittableScanTask<PositionDeletesScanTask> { + + BasePositionDeletesScanTask( + DeleteFile file, String schemaString, String specString, ResidualEvaluator evaluator) { + super(file, schemaString, specString, evaluator); + } + + @Override + protected BasePositionDeletesScanTask self() { + return this; + } + + @Override + protected PositionDeletesScanTask newSplitTask( + PositionDeletesScanTask parentTask, long offset, long length) { + return new SplitPositionDeletesScanTask(parentTask, offset, length); Review Comment: You could make `SplitPositionDeletesScanTask` a private nested class in this file, like we have in other tasks, but it is up to you. ########## core/src/main/java/org/apache/iceberg/PositionDeletesTable.java: ########## @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg; + +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.LoadingCache; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import org.apache.iceberg.expressions.Expressions; +import org.apache.iceberg.expressions.ManifestEvaluator; +import org.apache.iceberg.expressions.ResidualEvaluator; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; +import org.apache.iceberg.types.TypeUtil; +import org.apache.iceberg.types.Types; +import org.apache.iceberg.util.ParallelIterable; +import org.apache.iceberg.util.TableScanUtil; + +/** + * A {@link Table} implementation whose {@link Scan} provides {@link PositionDeletesScanTask}, for + * reading of position delete files. + */ +public class PositionDeletesTable extends BaseMetadataTable { + + private final Schema schema; + + PositionDeletesTable(Table table) { + super(table, table.name() + ".position_deletes"); + this.schema = calculateSchema(); + } + + PositionDeletesTable(Table table, String name) { + super(table, name); + this.schema = calculateSchema(); + } + + @Override + MetadataTableType metadataTableType() { + return MetadataTableType.POSITION_DELETES; + } + + @Override + public TableScan newScan() { + throw new UnsupportedOperationException( + "Cannot create TableScan from table of type POSITION_DELETES"); + } + + @Override + public BatchScan newBatchScan() { + return new PositionDeletesBatchScan(table(), schema()); + } + + @Override + public Schema schema() { + return schema; + } + + private Schema calculateSchema() { + Types.StructType partitionType = Partitioning.partitionType(table()); + Schema result = + new Schema( + MetadataColumns.DELETE_FILE_PATH, + MetadataColumns.DELETE_FILE_POS, + Types.NestedField.optional( + MetadataColumns.DELETE_FILE_ROW_FIELD_ID, + "row", + table().schema().asStruct(), + MetadataColumns.DELETE_FILE_ROW_DOC), + Types.NestedField.required( + MetadataColumns.PARTITION_COLUMN_ID, + "partition", + partitionType, + "Partition that position delete row belongs to"), + Types.NestedField.required( + MetadataColumns.SPEC_ID_COLUMN_ID, + "spec_id", + Types.IntegerType.get(), + MetadataColumns.SPEC_ID_COLUMN_DOC), + Types.NestedField.required( + MetadataColumns.FILE_PATH_COLUMN_ID, + "delete_file_path", + Types.StringType.get(), + MetadataColumns.FILE_PATH_COLUMN_DOC)); + + if (partitionType.fields().size() > 0) { + return result; + } else { + // avoid returning an empty struct, which is not always supported. + // instead, drop the partition field + return TypeUtil.selectNot(result, Sets.newHashSet(MetadataColumns.PARTITION_COLUMN_ID)); + } + } + + public static class PositionDeletesBatchScan + extends SnapshotScan<BatchScan, ScanTask, ScanTaskGroup<ScanTask>> implements BatchScan { + + protected PositionDeletesBatchScan(Table table, Schema schema) { + super(table, schema, new TableScanContext()); + } + + protected PositionDeletesBatchScan(Table table, Schema schema, TableScanContext context) { + super(table, schema, context); + } + + @Override + protected PositionDeletesBatchScan newRefinedScan( + Table newTable, Schema newSchema, TableScanContext newContext) { + return new PositionDeletesBatchScan(newTable, newSchema, newContext); + } + + @Override + public CloseableIterable<ScanTaskGroup<ScanTask>> planTasks() { + return TableScanUtil.planTaskGroups( + planFiles(), targetSplitSize(), splitLookback(), splitOpenFileCost()); + } + + @Override + protected List<String> scanColumns() { + return context().returnColumnStats() ? DELETE_SCAN_WITH_STATS_COLUMNS : DELETE_SCAN_COLUMNS; + } + + @Override + protected CloseableIterable<ScanTask> doPlanFiles() { + String schemaString = SchemaParser.toJson(tableSchema()); + + // prepare transformed partition specs and caches + Map<Integer, PartitionSpec> transformedSpecs = + table().specs().values().stream() + .map(spec -> transformSpec(tableSchema(), spec)) + .collect(Collectors.toMap(PartitionSpec::specId, spec -> spec)); + + LoadingCache<Integer, ResidualEvaluator> residualCache = + partitionCacheOf( + transformedSpecs, + spec -> + ResidualEvaluator.of( + spec, + shouldIgnoreResiduals() ? Expressions.alwaysTrue() : filter(), + isCaseSensitive())); + + LoadingCache<Integer, String> specStringCache = + partitionCacheOf(transformedSpecs, PartitionSpecParser::toJson); + + LoadingCache<Integer, ManifestEvaluator> evalCache = + partitionCacheOf( + transformedSpecs, + spec -> ManifestEvaluator.forRowFilter(filter(), spec, isCaseSensitive())); + + // iterate through delete manifests + CloseableIterable<ManifestFile> deleteManifests = + CloseableIterable.withNoopClose(snapshot().deleteManifests(table().io())); + CloseableIterable<ManifestFile> filteredManifests = + CloseableIterable.filter( + deleteManifests, + manifest -> evalCache.get(manifest.partitionSpecId()).eval(manifest)); + Iterable<CloseableIterable<ScanTask>> results = + CloseableIterable.transform( + filteredManifests, + manifest -> { Review Comment: Would this logic read all manifests on the driver and suffer from the issue fixed by #5206? I think we did a trick for regular planning to make sure manifests were read in parallel. One way to check is to log thread name before reading and see if different threads read manifests. I'd test this as-is and see if needs changes. ########## core/src/main/java/org/apache/iceberg/SnapshotScan.java: ########## @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg; + +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.apache.iceberg.events.Listeners; +import org.apache.iceberg.events.ScanEvent; +import org.apache.iceberg.expressions.ExpressionUtil; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.metrics.DefaultMetricsContext; +import org.apache.iceberg.metrics.ImmutableScanReport; +import org.apache.iceberg.metrics.ScanMetrics; +import org.apache.iceberg.metrics.ScanMetricsResult; +import org.apache.iceberg.metrics.ScanReport; +import org.apache.iceberg.metrics.Timer; +import org.apache.iceberg.relocated.com.google.common.base.MoreObjects; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.relocated.com.google.common.collect.Maps; +import org.apache.iceberg.types.TypeUtil; +import org.apache.iceberg.util.DateTimeUtil; +import org.apache.iceberg.util.SnapshotUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This is a common base class to share code between different BaseScan implementations that handle + * scans of a particular snapshot. + * + * @param <ThisT> actual BaseScan implementation class type + * @param <T> type of ScanTask returned + * @param <G> type of ScanTaskGroup returned + */ +public abstract class SnapshotScan<ThisT, T extends ScanTask, G extends ScanTaskGroup<T>> + extends BaseScan<ThisT, T, G> { + + private static final Logger LOG = LoggerFactory.getLogger(SnapshotScan.class); + + private ScanMetrics scanMetrics; + + protected SnapshotScan(Table table, Schema schema, TableScanContext context) { + super(table, schema, context); + } + + protected Long snapshotId() { + return context().snapshotId(); + } + + protected abstract CloseableIterable<T> doPlanFiles(); + + protected ScanMetrics scanMetrics() { + if (scanMetrics == null) { + this.scanMetrics = ScanMetrics.of(new DefaultMetricsContext()); + } + + return scanMetrics; + } + + @Override + public Table table() { + return super.table(); + } + + public ThisT useSnapshot(long scanSnapshotId) { + Preconditions.checkArgument( + snapshotId() == null, "Cannot override snapshot, already set snapshot id=%s", snapshotId()); + Preconditions.checkArgument( + table().snapshot(scanSnapshotId) != null, + "Cannot find snapshot with ID %s", + scanSnapshotId); + TableScanContext newContext = context().useSnapshotId(scanSnapshotId); Review Comment: nit: Redundant variable? I think the call below fits on one line now so we can get rid of this var. You could use the temp var too, up to you. Just make it consistent here and below. ########## core/src/main/java/org/apache/iceberg/BasePositionDeletesScanTask.java: ########## @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg; + +import org.apache.iceberg.expressions.ResidualEvaluator; + +/** Base implememntation of {@link PositionDeletesScanTask} */ Review Comment: Typo `implememntation` -> `implementation`? ########## core/src/main/java/org/apache/iceberg/MetadataColumns.java: ########## @@ -30,12 +30,11 @@ public class MetadataColumns { private MetadataColumns() {} // IDs Integer.MAX_VALUE - (1-100) are used for metadata columns + public static final int FILE_PATH_COLUMN_ID = Integer.MAX_VALUE - 1; Review Comment: +1 to this solution ########## core/src/main/java/org/apache/iceberg/PositionDeletesTable.java: ########## @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg; + +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.LoadingCache; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import org.apache.iceberg.expressions.Expressions; +import org.apache.iceberg.expressions.ManifestEvaluator; +import org.apache.iceberg.expressions.ResidualEvaluator; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; +import org.apache.iceberg.types.TypeUtil; +import org.apache.iceberg.types.Types; +import org.apache.iceberg.util.ParallelIterable; +import org.apache.iceberg.util.TableScanUtil; + +/** + * A {@link Table} implementation whose {@link Scan} provides {@link PositionDeletesScanTask}, for + * reading of position delete files. + */ +public class PositionDeletesTable extends BaseMetadataTable { + + private final Schema schema; + + PositionDeletesTable(Table table) { + super(table, table.name() + ".position_deletes"); + this.schema = calculateSchema(); + } + + PositionDeletesTable(Table table, String name) { + super(table, name); + this.schema = calculateSchema(); + } + + @Override + MetadataTableType metadataTableType() { + return MetadataTableType.POSITION_DELETES; + } + + @Override + public TableScan newScan() { + throw new UnsupportedOperationException( + "Cannot create TableScan from table of type POSITION_DELETES"); + } + + @Override + public BatchScan newBatchScan() { + return new PositionDeletesBatchScan(table(), schema()); + } + + @Override + public Schema schema() { + return schema; + } + + private Schema calculateSchema() { + Types.StructType partitionType = Partitioning.partitionType(table()); + Schema result = + new Schema( + MetadataColumns.DELETE_FILE_PATH, + MetadataColumns.DELETE_FILE_POS, + Types.NestedField.optional( + MetadataColumns.DELETE_FILE_ROW_FIELD_ID, + "row", + table().schema().asStruct(), Review Comment: Once we add support to engines, we will have to test schema evolution. ########## core/src/main/java/org/apache/iceberg/PositionDeletesTable.java: ########## @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg; + +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.LoadingCache; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import org.apache.iceberg.expressions.Expressions; +import org.apache.iceberg.expressions.ManifestEvaluator; +import org.apache.iceberg.expressions.ResidualEvaluator; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; +import org.apache.iceberg.types.TypeUtil; +import org.apache.iceberg.types.Types; +import org.apache.iceberg.util.ParallelIterable; +import org.apache.iceberg.util.TableScanUtil; + +/** + * A {@link Table} implementation whose {@link Scan} provides {@link PositionDeletesScanTask}, for + * reading of position delete files. + */ +public class PositionDeletesTable extends BaseMetadataTable { + + private final Schema schema; + + PositionDeletesTable(Table table) { + super(table, table.name() + ".position_deletes"); + this.schema = calculateSchema(); + } + + PositionDeletesTable(Table table, String name) { + super(table, name); + this.schema = calculateSchema(); + } + + @Override + MetadataTableType metadataTableType() { + return MetadataTableType.POSITION_DELETES; + } + + @Override + public TableScan newScan() { + throw new UnsupportedOperationException( + "Cannot create TableScan from table of type POSITION_DELETES"); + } + + @Override + public BatchScan newBatchScan() { + return new PositionDeletesBatchScan(table(), schema()); + } + + @Override + public Schema schema() { + return schema; + } + + private Schema calculateSchema() { + Types.StructType partitionType = Partitioning.partitionType(table()); + Schema result = + new Schema( + MetadataColumns.DELETE_FILE_PATH, + MetadataColumns.DELETE_FILE_POS, + Types.NestedField.optional( + MetadataColumns.DELETE_FILE_ROW_FIELD_ID, + "row", Review Comment: Shall we use `MetadataColumns.DELETE_FILE_ROW_FIELD_NAME`? ########## core/src/main/java/org/apache/iceberg/util/PartitionUtil.java: ########## @@ -91,7 +91,7 @@ private PartitionUtil() {} } // adapts the provided partition data to match the table partition type - private static StructLike coercePartition( + public static StructLike coercePartition( Review Comment: Do we still need this as public? ########## core/src/main/java/org/apache/iceberg/PositionDeletesTable.java: ########## @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg; + +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.LoadingCache; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import org.apache.iceberg.expressions.Expressions; +import org.apache.iceberg.expressions.ManifestEvaluator; +import org.apache.iceberg.expressions.ResidualEvaluator; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; +import org.apache.iceberg.types.TypeUtil; +import org.apache.iceberg.types.Types; +import org.apache.iceberg.util.ParallelIterable; +import org.apache.iceberg.util.TableScanUtil; + +/** + * A {@link Table} implementation whose {@link Scan} provides {@link PositionDeletesScanTask}, for + * reading of position delete files. + */ +public class PositionDeletesTable extends BaseMetadataTable { + + private final Schema schema; + + PositionDeletesTable(Table table) { + super(table, table.name() + ".position_deletes"); + this.schema = calculateSchema(); + } + + PositionDeletesTable(Table table, String name) { + super(table, name); + this.schema = calculateSchema(); + } + + @Override + MetadataTableType metadataTableType() { + return MetadataTableType.POSITION_DELETES; + } + + @Override + public TableScan newScan() { + throw new UnsupportedOperationException( + "Cannot create TableScan from table of type POSITION_DELETES"); + } + + @Override + public BatchScan newBatchScan() { + return new PositionDeletesBatchScan(table(), schema()); + } + + @Override + public Schema schema() { + return schema; + } + + private Schema calculateSchema() { + Types.StructType partitionType = Partitioning.partitionType(table()); + Schema result = + new Schema( + MetadataColumns.DELETE_FILE_PATH, + MetadataColumns.DELETE_FILE_POS, + Types.NestedField.optional( + MetadataColumns.DELETE_FILE_ROW_FIELD_ID, + "row", + table().schema().asStruct(), + MetadataColumns.DELETE_FILE_ROW_DOC), + Types.NestedField.required( + MetadataColumns.PARTITION_COLUMN_ID, + "partition", + partitionType, + "Partition that position delete row belongs to"), + Types.NestedField.required( + MetadataColumns.SPEC_ID_COLUMN_ID, + "spec_id", + Types.IntegerType.get(), + MetadataColumns.SPEC_ID_COLUMN_DOC), + Types.NestedField.required( + MetadataColumns.FILE_PATH_COLUMN_ID, + "delete_file_path", + Types.StringType.get(), + MetadataColumns.FILE_PATH_COLUMN_DOC)); + + if (partitionType.fields().size() > 0) { + return result; + } else { + // avoid returning an empty struct, which is not always supported. + // instead, drop the partition field + return TypeUtil.selectNot(result, Sets.newHashSet(MetadataColumns.PARTITION_COLUMN_ID)); + } + } + + public static class PositionDeletesBatchScan + extends SnapshotScan<BatchScan, ScanTask, ScanTaskGroup<ScanTask>> implements BatchScan { + + protected PositionDeletesBatchScan(Table table, Schema schema) { + super(table, schema, new TableScanContext()); + } + + protected PositionDeletesBatchScan(Table table, Schema schema, TableScanContext context) { + super(table, schema, context); + } + + @Override + protected PositionDeletesBatchScan newRefinedScan( + Table newTable, Schema newSchema, TableScanContext newContext) { + return new PositionDeletesBatchScan(newTable, newSchema, newContext); + } + + @Override + public CloseableIterable<ScanTaskGroup<ScanTask>> planTasks() { + return TableScanUtil.planTaskGroups( + planFiles(), targetSplitSize(), splitLookback(), splitOpenFileCost()); + } + + @Override + protected List<String> scanColumns() { + return context().returnColumnStats() ? DELETE_SCAN_WITH_STATS_COLUMNS : DELETE_SCAN_COLUMNS; + } + + @Override + protected CloseableIterable<ScanTask> doPlanFiles() { + String schemaString = SchemaParser.toJson(tableSchema()); + + // prepare transformed partition specs and caches + Map<Integer, PartitionSpec> transformedSpecs = + table().specs().values().stream() + .map(spec -> transformSpec(tableSchema(), spec)) + .collect(Collectors.toMap(PartitionSpec::specId, spec -> spec)); + + LoadingCache<Integer, ResidualEvaluator> residualCache = + partitionCacheOf( + transformedSpecs, + spec -> + ResidualEvaluator.of( + spec, + shouldIgnoreResiduals() ? Expressions.alwaysTrue() : filter(), + isCaseSensitive())); + + LoadingCache<Integer, String> specStringCache = + partitionCacheOf(transformedSpecs, PartitionSpecParser::toJson); + + LoadingCache<Integer, ManifestEvaluator> evalCache = + partitionCacheOf( + transformedSpecs, + spec -> ManifestEvaluator.forRowFilter(filter(), spec, isCaseSensitive())); + + // iterate through delete manifests + CloseableIterable<ManifestFile> deleteManifests = + CloseableIterable.withNoopClose(snapshot().deleteManifests(table().io())); + CloseableIterable<ManifestFile> filteredManifests = + CloseableIterable.filter( + deleteManifests, + manifest -> evalCache.get(manifest.partitionSpecId()).eval(manifest)); + Iterable<CloseableIterable<ScanTask>> results = + CloseableIterable.transform( + filteredManifests, + manifest -> { + // Filter partitions + CloseableIterable<ManifestEntry<DeleteFile>> deleteFileEntries = + ManifestFiles.readDeleteManifest(manifest, tableOps().io(), transformedSpecs) Review Comment: nit: Shall we use `table().io()` as `tableOps()` has been deprecated? ########## core/src/main/java/org/apache/iceberg/PositionDeletesTable.java: ########## @@ -0,0 +1,372 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg; + +import static org.apache.iceberg.MetadataColumns.POSITION_DELETE_TABLE_FILE_PATH; +import static org.apache.iceberg.MetadataColumns.POSITION_DELETE_TABLE_PARTITION_FIELD_ID; +import static org.apache.iceberg.MetadataColumns.POSITION_DELETE_TABLE_SPEC_ID; + +import java.util.Map; +import java.util.function.BiFunction; +import java.util.stream.Collectors; +import org.apache.iceberg.expressions.Expression; +import org.apache.iceberg.expressions.Expressions; +import org.apache.iceberg.expressions.ResidualEvaluator; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.relocated.com.google.common.base.MoreObjects; +import org.apache.iceberg.relocated.com.google.common.collect.Maps; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; +import org.apache.iceberg.types.Type; +import org.apache.iceberg.types.TypeUtil; +import org.apache.iceberg.types.Types; +import org.apache.iceberg.util.Pair; +import org.apache.iceberg.util.ParallelIterable; +import org.apache.iceberg.util.PartitionUtil; +import org.apache.iceberg.util.TableScanUtil; + +public class PositionDeletesTable extends BaseMetadataTable { + + private final Schema schema; + + PositionDeletesTable(TableOperations ops, Table table) { + super(ops, table, table.name() + ".position_deletes"); + this.schema = calculateSchema(); + } + + PositionDeletesTable(TableOperations ops, Table table, String name) { + super(ops, table, name); + this.schema = calculateSchema(); + } + + @Override + MetadataTableType metadataTableType() { + return MetadataTableType.POSITION_DELETES; + } + + @Override + public TableScan newScan() { + throw new UnsupportedOperationException( + "Cannot create TableScan from table of type POSITION_DELETES"); + } + + @Override + public BatchScan newBatchScan() { + return new PositionDeletesTableScan(operations(), table(), schema()); + } + + @Override + public Schema schema() { + return schema; + } + + private Schema calculateSchema() { + Types.StructType partitionType = Partitioning.partitionType(table()); + Schema result = + new Schema( + MetadataColumns.DELETE_FILE_PATH, + MetadataColumns.DELETE_FILE_POS, + Types.NestedField.optional( + MetadataColumns.DELETE_FILE_ROW_FIELD_ID, + "row", + table().schema().asStruct(), + MetadataColumns.DELETE_FILE_ROW_DOC), + Types.NestedField.required( + POSITION_DELETE_TABLE_PARTITION_FIELD_ID, + "partition", + partitionType, + "Partition that position delete row belongs to"), + Types.NestedField.required( + POSITION_DELETE_TABLE_SPEC_ID, + "spec_id", + Types.IntegerType.get(), + "Spec ID of the file that the position delete row belongs to"), + Types.NestedField.required( + POSITION_DELETE_TABLE_FILE_PATH, + "delete_file_path", + Types.StringType.get(), + "Spec ID of the file that the position delete row belongs to")); + + if (partitionType.fields().size() > 0) { + return result; + } else { + // avoid returning an empty struct, which is not always supported. instead, drop the partition + // field + return TypeUtil.selectNot(result, Sets.newHashSet(POSITION_DELETE_TABLE_PARTITION_FIELD_ID)); + } + } + + public static class PositionDeletesTableScan + extends SnapshotScan<BatchScan, ScanTask, ScanTaskGroup<ScanTask>> implements BatchScan { + + protected PositionDeletesTableScan(TableOperations ops, Table table, Schema schema) { + super(ops, table, schema, new TableScanContext()); + } + + protected PositionDeletesTableScan( + TableOperations ops, Table table, Schema schema, TableScanContext context) { + super(ops, table, schema, context); + } + + @Override + protected PositionDeletesTableScan newRefinedScan( + TableOperations newOps, Table newTable, Schema newSchema, TableScanContext newContext) { + return new PositionDeletesTableScan(newOps, newTable, newSchema, newContext); + } + + @Override + public CloseableIterable<ScanTaskGroup<ScanTask>> planTasks() { + CloseableIterable<ScanTask> scanTasks = planFiles(); + return TableScanUtil.planTaskGroups( + scanTasks, targetSplitSize(), splitLookback(), splitOpenFileCost()); + } + + @Override + protected CloseableIterable<ScanTask> doPlanFiles() { + Expression rowFilter = context().rowFilter(); + String schemaString = SchemaParser.toJson(tableSchema()); + + Map<Integer, PartitionSpec> transformedSpecs = + table().specs().entrySet().stream() + .map( + e -> + Pair.of( + e.getKey(), BaseMetadataTable.transformSpec(tableSchema(), e.getValue()))) + .collect(Collectors.toMap(Pair::first, Pair::second)); + + CloseableIterable<ManifestFile> deleteManifests = + CloseableIterable.withNoopClose(snapshot().deleteManifests(tableOps().io())); + CloseableIterable<CloseableIterable<ScanTask>> results = + CloseableIterable.transform( + deleteManifests, + m -> { + // Filter partitions + CloseableIterable<ManifestEntry<DeleteFile>> deleteFileEntries = + ManifestFiles.readDeleteManifest(m, tableOps().io(), transformedSpecs) + .caseSensitive(isCaseSensitive()) + .filterRows(rowFilter) + .liveEntries(); + + // Filter delete file type + CloseableIterable<ManifestEntry<DeleteFile>> positionDeleteEntries = + CloseableIterable.filter( + deleteFileEntries, + entry -> entry.file().content().equals(FileContent.POSITION_DELETES)); + + Types.StructType partitionType = Partitioning.partitionType(table()); + + return CloseableIterable.transform( Review Comment: Yep, looks correct. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org