szehon-ho commented on code in PR #6365:
URL: https://github.com/apache/iceberg/pull/6365#discussion_r1042897403


##########
core/src/main/java/org/apache/iceberg/PositionDeletesTable.java:
##########
@@ -0,0 +1,366 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg;
+
+import static 
org.apache.iceberg.MetadataColumns.POSITION_DELETE_TABLE_FILE_PATH;
+import static 
org.apache.iceberg.MetadataColumns.POSITION_DELETE_TABLE_PARTITION_FIELD_ID;
+import static org.apache.iceberg.MetadataColumns.POSITION_DELETE_TABLE_SPEC_ID;
+
+import java.util.Map;
+import java.util.function.BiFunction;
+import java.util.stream.Collectors;
+import org.apache.iceberg.expressions.Expression;
+import org.apache.iceberg.expressions.Expressions;
+import org.apache.iceberg.expressions.ResidualEvaluator;
+import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.types.Type;
+import org.apache.iceberg.types.TypeUtil;
+import org.apache.iceberg.types.Types;
+import org.apache.iceberg.util.Pair;
+import org.apache.iceberg.util.ParallelIterable;
+import org.apache.iceberg.util.PartitionUtil;
+
+public class PositionDeletesTable extends BaseTable {
+
+  private final Table table;
+
+  PositionDeletesTable(TableOperations ops, Table table) {
+    super(ops, table.name() + ".position_deletes");
+    this.table = table;
+  }
+
+  PositionDeletesTable(TableOperations ops, Table table, String name) {
+    super(ops, name);
+    this.table = table;
+  }
+
+  protected Table table() {
+    return table;
+  }
+
+  @Override
+  public TableScan newScan() {
+    throw new UnsupportedOperationException(
+        "Cannot create TableScan from table of type POSITION_DELETES");
+  }
+
+  @Override
+  public BatchScan newBatchScan() {
+    return new PositionDeletesTableScan(operations(), table(), schema());
+  }
+
+  @Override
+  public Schema schema() {
+    return PositionDeletesTable.schema(table(), 
Partitioning.partitionType(table()));
+  }
+
+  public static Schema schema(Table table, Types.StructType partitionType) {
+    Schema result =
+        new Schema(
+            MetadataColumns.DELETE_FILE_PATH,
+            MetadataColumns.DELETE_FILE_POS,
+            Types.NestedField.optional(
+                MetadataColumns.DELETE_FILE_ROW_FIELD_ID,
+                "row",
+                table.schema().asStruct(),
+                MetadataColumns.DELETE_FILE_ROW_DOC),
+            Types.NestedField.required(
+                POSITION_DELETE_TABLE_PARTITION_FIELD_ID,
+                "partition",
+                partitionType,
+                "Partition that position delete row belongs to"),
+            Types.NestedField.required(
+                POSITION_DELETE_TABLE_SPEC_ID,
+                "spec_id",
+                Types.IntegerType.get(),
+                "Spec ID of the file that the position delete row belongs to"),
+            Types.NestedField.required(
+                POSITION_DELETE_TABLE_FILE_PATH,
+                "delete_file_path",
+                Types.StringType.get(),
+                "Spec ID of the file that the position delete row belongs 
to"));
+
+    if (partitionType.fields().size() > 0) {
+      return result;
+    } else {
+      // avoid returning an empty struct, which is not always supported. 
instead, drop the partition
+      // field
+      return TypeUtil.selectNot(result, 
Sets.newHashSet(POSITION_DELETE_TABLE_PARTITION_FIELD_ID));
+    }
+  }
+
+  public static class PositionDeletesTableScan
+      extends AbstractTableScan<
+          BatchScan, org.apache.iceberg.ScanTask, 
ScanTaskGroup<org.apache.iceberg.ScanTask>>
+      implements BatchScan {
+
+    protected PositionDeletesTableScan(TableOperations ops, Table table, 
Schema schema) {
+      super(ops, table, schema, new TableScanContext());
+    }
+
+    protected PositionDeletesTableScan(
+        TableOperations ops, Table table, Schema schema, TableScanContext 
context) {
+      super(ops, table, schema, context);
+    }
+
+    @Override
+    protected PositionDeletesTableScan newRefinedScan(
+        TableOperations newOps, Table newTable, Schema newSchema, 
TableScanContext newContext) {
+      return new PositionDeletesTableScan(newOps, newTable, newSchema, 
newContext);
+    }
+
+    @Override
+    protected CloseableIterable<org.apache.iceberg.ScanTask> doPlanFiles() {
+      Expression rowFilter = context().rowFilter();
+      String schemaString = SchemaParser.toJson(tableSchema());
+
+      Map<Integer, PartitionSpec> transformedSpecs =
+          table().specs().entrySet().stream()
+              .map(
+                  e ->
+                      Pair.of(
+                          e.getKey(), 
BaseMetadataTable.transformSpec(tableSchema(), e.getValue())))
+              .collect(Collectors.toMap(Pair::first, Pair::second));
+
+      CloseableIterable<ManifestFile> deleteManifests =
+          
CloseableIterable.withNoopClose(snapshot().deleteManifests(tableOps().io()));
+      CloseableIterable<CloseableIterable<org.apache.iceberg.ScanTask>> 
results =
+          CloseableIterable.transform(
+              deleteManifests,
+              m -> {
+                // Filter partitions
+                CloseableIterable<ManifestEntry<DeleteFile>> deleteFileEntries 
=
+                    ManifestFiles.readDeleteManifest(m, tableOps().io(), 
transformedSpecs)
+                        .caseSensitive(isCaseSensitive())
+                        .filterRows(rowFilter)
+                        .liveEntries();
+
+                // Filter delete file type
+                CloseableIterable<ManifestEntry<DeleteFile>> 
positionDeleteEntries =
+                    CloseableIterable.filter(
+                        deleteFileEntries,
+                        entry -> 
entry.file().content().equals(FileContent.POSITION_DELETES));
+
+                Types.StructType partitionType = 
Partitioning.partitionType(table());
+
+                return CloseableIterable.transform(
+                    positionDeleteEntries,
+                    entry -> {
+                      PartitionSpec spec = 
transformedSpecs.get(entry.file().specId());
+                      String specString = PartitionSpecParser.toJson(spec);
+                      return new ScanTask(
+                          entry.file(),
+                          schemaString,
+                          specString,
+                          ResidualEvaluator.of(
+                              spec, Expressions.alwaysTrue(), 
context().caseSensitive()),
+                          partitionType);
+                    });
+              });
+
+      return new ParallelIterable<>(results, planExecutor());
+    }
+  }
+
+  /** Scan task for position delete files */
+  public static class ScanTask extends BaseContentScanTask<ScanTask, 
DeleteFile> {
+
+    private final String schemaString;
+    private final String specString;
+    private final ResidualEvaluator evaluator;
+    private final Types.StructType partitionType;
+
+    public ScanTask(
+        DeleteFile file,
+        String schemaString,
+        String specString,
+        ResidualEvaluator evaluator,
+        Types.StructType partitionType) {
+      super(file, schemaString, specString, evaluator);
+      this.schemaString = schemaString;
+      this.specString = specString;
+      this.evaluator = evaluator;
+      this.partitionType = partitionType;
+    }
+
+    @Override
+    public long sizeBytes() {
+      return length();
+    }
+
+    String schemaString() {
+      return schemaString;
+    }
+
+    String specString() {
+      return specString;
+    }
+
+    ResidualEvaluator evaluator() {
+      return evaluator;
+    }
+
+    Types.StructType partitionType() {
+      return partitionType;
+    }
+
+    @Override
+    protected ScanTask self() {
+      return this;
+    }
+
+    @Override
+    protected ScanTask newSplitTask(ScanTask parentTask, long offset, long 
length) {
+      return new SplitScanTask(parentTask, offset, length);
+    }
+
+    /**
+     * Utility method to get constant values of rows to be scanned by this 
task. The following
+     * columns are constants of each PositionDeletesFileScanTask: * spec_id * 
partition *
+     * delete_file_path
+     *
+     * @return a map of column id to constant values returned by this task type
+     */
+    public Map<Integer, ?> constantsMap() {
+      return constantsMap((type, constant) -> constant);
+    }
+
+    /**
+     * Utility method to get constant values of rows to be scanned by this 
task. The following
+     * columns are constants of each PositionDeletesFileScanTask: * spec_id * 
partition *
+     * delete_file_path
+     *
+     * @param convertConstant callback to convert from an Iceberg typed object 
to an engine specific
+     *     object
+     * @return a map of column id to constant values returned by this task type
+     */
+    public Map<Integer, ?> constantsMap(BiFunction<Type, Object, Object> 
convertConstant) {
+      Map<Integer, Object> idToConstant = Maps.newHashMap();
+      StructLike partitionData = file().partition();
+
+      idToConstant.put(
+          POSITION_DELETE_TABLE_FILE_PATH,
+          convertConstant.apply(Types.StringType.get(), file().path()));
+
+      // add spec_id constant column
+      idToConstant.put(
+          POSITION_DELETE_TABLE_SPEC_ID,
+          convertConstant.apply(Types.IntegerType.get(), file().specId()));
+
+      // add partition constant column
+      if (partitionType != null) {
+        if (partitionType.fields().size() > 0) {
+          StructLike coercedPartition =
+              PartitionUtil.coercePartition(partitionType, spec(), 
partitionData);
+          idToConstant.put(
+              POSITION_DELETE_TABLE_PARTITION_FIELD_ID,
+              convertConstant.apply(partitionType, coercedPartition));
+        } else {
+          // use null as some query engines may not be able to handle empty 
structs
+          idToConstant.put(POSITION_DELETE_TABLE_PARTITION_FIELD_ID, null);
+        }
+      }
+
+      return idToConstant;
+    }
+  }
+
+  public static class SplitScanTask extends ScanTask
+      implements ContentScanTask<DeleteFile>, MergeableScanTask<SplitScanTask> 
{
+
+    private final ScanTask parentTask;
+    private final long offset;
+    private final long len;
+
+    protected SplitScanTask(ScanTask parentTask, long offset, long length) {
+      super(

Review Comment:
   Note: SplitScanTask needs to be an instance of ScanTask because 
ScanTask.newSplitTask needs to return ScanTask to satisfy the type.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to