szehon-ho commented on code in PR #6365:
URL: https://github.com/apache/iceberg/pull/6365#discussion_r1052778929


##########
core/src/main/java/org/apache/iceberg/PositionDeletesTable.java:
##########
@@ -0,0 +1,366 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg;
+
+import static 
org.apache.iceberg.MetadataColumns.POSITION_DELETE_TABLE_FILE_PATH;
+import static 
org.apache.iceberg.MetadataColumns.POSITION_DELETE_TABLE_PARTITION_FIELD_ID;
+import static org.apache.iceberg.MetadataColumns.POSITION_DELETE_TABLE_SPEC_ID;
+
+import java.util.Map;
+import java.util.function.BiFunction;
+import java.util.stream.Collectors;
+import org.apache.iceberg.expressions.Expression;
+import org.apache.iceberg.expressions.Expressions;
+import org.apache.iceberg.expressions.ResidualEvaluator;
+import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.types.Type;
+import org.apache.iceberg.types.TypeUtil;
+import org.apache.iceberg.types.Types;
+import org.apache.iceberg.util.Pair;
+import org.apache.iceberg.util.ParallelIterable;
+import org.apache.iceberg.util.PartitionUtil;
+
+public class PositionDeletesTable extends BaseTable {
+
+  private final Table table;
+
+  PositionDeletesTable(TableOperations ops, Table table) {
+    super(ops, table.name() + ".position_deletes");
+    this.table = table;
+  }
+
+  PositionDeletesTable(TableOperations ops, Table table, String name) {
+    super(ops, name);
+    this.table = table;
+  }
+
+  protected Table table() {
+    return table;
+  }
+
+  @Override
+  public TableScan newScan() {
+    throw new UnsupportedOperationException(
+        "Cannot create TableScan from table of type POSITION_DELETES");
+  }
+
+  @Override
+  public BatchScan newBatchScan() {
+    return new PositionDeletesTableScan(operations(), table(), schema());
+  }
+
+  @Override
+  public Schema schema() {
+    return PositionDeletesTable.schema(table(), 
Partitioning.partitionType(table()));
+  }
+
+  public static Schema schema(Table table, Types.StructType partitionType) {
+    Schema result =
+        new Schema(
+            MetadataColumns.DELETE_FILE_PATH,
+            MetadataColumns.DELETE_FILE_POS,
+            Types.NestedField.optional(
+                MetadataColumns.DELETE_FILE_ROW_FIELD_ID,
+                "row",
+                table.schema().asStruct(),
+                MetadataColumns.DELETE_FILE_ROW_DOC),
+            Types.NestedField.required(
+                POSITION_DELETE_TABLE_PARTITION_FIELD_ID,
+                "partition",
+                partitionType,
+                "Partition that position delete row belongs to"),
+            Types.NestedField.required(
+                POSITION_DELETE_TABLE_SPEC_ID,
+                "spec_id",
+                Types.IntegerType.get(),
+                "Spec ID of the file that the position delete row belongs to"),
+            Types.NestedField.required(
+                POSITION_DELETE_TABLE_FILE_PATH,
+                "delete_file_path",
+                Types.StringType.get(),
+                "Spec ID of the file that the position delete row belongs 
to"));
+
+    if (partitionType.fields().size() > 0) {
+      return result;
+    } else {
+      // avoid returning an empty struct, which is not always supported. 
instead, drop the partition
+      // field
+      return TypeUtil.selectNot(result, 
Sets.newHashSet(POSITION_DELETE_TABLE_PARTITION_FIELD_ID));
+    }
+  }
+
+  public static class PositionDeletesTableScan
+      extends AbstractTableScan<
+          BatchScan, org.apache.iceberg.ScanTask, 
ScanTaskGroup<org.apache.iceberg.ScanTask>>
+      implements BatchScan {
+
+    protected PositionDeletesTableScan(TableOperations ops, Table table, 
Schema schema) {
+      super(ops, table, schema, new TableScanContext());
+    }
+
+    protected PositionDeletesTableScan(
+        TableOperations ops, Table table, Schema schema, TableScanContext 
context) {
+      super(ops, table, schema, context);
+    }
+
+    @Override
+    protected PositionDeletesTableScan newRefinedScan(
+        TableOperations newOps, Table newTable, Schema newSchema, 
TableScanContext newContext) {
+      return new PositionDeletesTableScan(newOps, newTable, newSchema, 
newContext);
+    }
+
+    @Override
+    protected CloseableIterable<org.apache.iceberg.ScanTask> doPlanFiles() {
+      Expression rowFilter = context().rowFilter();
+      String schemaString = SchemaParser.toJson(tableSchema());
+
+      Map<Integer, PartitionSpec> transformedSpecs =
+          table().specs().entrySet().stream()
+              .map(
+                  e ->
+                      Pair.of(
+                          e.getKey(), 
BaseMetadataTable.transformSpec(tableSchema(), e.getValue())))
+              .collect(Collectors.toMap(Pair::first, Pair::second));
+
+      CloseableIterable<ManifestFile> deleteManifests =

Review Comment:
   I think it should be done- transformSpec does this, and 
TestMetadataTableScan should also test this path.



##########
core/src/test/java/org/apache/iceberg/TestSplitPlanning.java:
##########
@@ -234,6 +234,22 @@ public void testSplitPlanningWithOffsetsUnableToSplit() {
         "We should still only get 2 tasks per file", 32, 
Iterables.size(scan.planTasks()));
   }
 
+  @Test
+  public void testBasicSplitPlanningDeleteFiles() {

Review Comment:
   I think not but could be wrong, I dont see it being set in DeleteFile 
builder:  
https://github.com/apache/iceberg/blob/master/core/src/main/java/org/apache/iceberg/FileMetadata.java#L38



##########
core/src/test/java/org/apache/iceberg/hadoop/TestStaticTable.java:
##########
@@ -71,11 +71,20 @@ public void testCannotDoIncrementalScanOnMetadataTable() {
 
     for (MetadataTableType type : MetadataTableType.values()) {
       Table staticTable = getStaticTable(type);
-      AssertHelpers.assertThrows(
-          "Static tables do not support incremental scans",
-          UnsupportedOperationException.class,
-          String.format("Cannot incrementally scan table of type %s", type),
-          () -> staticTable.newScan().appendsAfter(1));
+
+      if (type.equals(MetadataTableType.POSITION_DELETES)) {
+        AssertHelpers.assertThrows(
+            "POSITION_DELETES table does not support TableScan",

Review Comment:
   So I think with the new interface adoption, PositionDeleteTable is 
implementing newBatchScan() instead of newScan() , and throwing exception for 
the latter.  (As TableScan is bound with FileScanTask)



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to