lirui-apache commented on code in PR #10176:
URL: https://github.com/apache/iceberg/pull/10176#discussion_r1705560828


##########
data/src/main/java/org/apache/iceberg/data/PartitionStatsWriterUtil.java:
##########
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.data;
+
+import static org.apache.iceberg.TableProperties.DEFAULT_FILE_FORMAT;
+import static org.apache.iceberg.TableProperties.DEFAULT_FILE_FORMAT_DEFAULT;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.Iterator;
+import org.apache.iceberg.FileFormat;
+import org.apache.iceberg.HasTableOperations;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.PartitionStatsUtil;
+import org.apache.iceberg.Partitioning;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.data.parquet.GenericParquetReaders;
+import org.apache.iceberg.encryption.EncryptedFiles;
+import org.apache.iceberg.encryption.EncryptionKeyMetadata;
+import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.io.DataWriter;
+import org.apache.iceberg.io.FileWriterFactory;
+import org.apache.iceberg.io.InputFile;
+import org.apache.iceberg.io.OutputFile;
+import org.apache.iceberg.parquet.Parquet;
+
+public final class PartitionStatsWriterUtil {
+
+  private PartitionStatsWriterUtil() {}
+
+  public static OutputFile newPartitionStatsFile(Table table, long snapshotId) 
{
+    FileFormat fileFormat =
+        FileFormat.fromString(
+            table.properties().getOrDefault(DEFAULT_FILE_FORMAT, 
DEFAULT_FILE_FORMAT_DEFAULT));
+    return table
+        .io()
+        .newOutputFile(
+            ((HasTableOperations) table)
+                .operations()
+                .metadataFileLocation(
+                    
fileFormat.addExtension(String.format("partition-stats-%d", snapshotId))));
+  }
+
+  public static void writePartitionStatsFile(
+      Table table, Iterator<Record> records, OutputFile outputFile) {
+    Schema dataSchema = 
PartitionStatsUtil.schema(Partitioning.partitionType(table));
+    FileFormat fileFormat =

Review Comment:
   The reader only supports Parquet at the moment. Why we make the writer 
support different formats?



##########
data/src/main/java/org/apache/iceberg/data/PartitionStatsGenerator.java:
##########
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.data;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import org.apache.iceberg.ImmutableGenericPartitionStatisticsFile;
+import org.apache.iceberg.ManifestFile;
+import org.apache.iceberg.PartitionStatisticsFile;
+import org.apache.iceberg.PartitionStatsUtil;
+import org.apache.iceberg.Partitioning;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.io.OutputFile;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.apache.iceberg.relocated.com.google.common.collect.Iterators;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.types.Comparators;
+import org.apache.iceberg.types.Types;
+import org.apache.iceberg.util.SnapshotUtil;
+import org.apache.iceberg.util.Tasks;
+import org.apache.iceberg.util.ThreadPools;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class PartitionStatsGenerator {
+  private static final Logger LOG = 
LoggerFactory.getLogger(PartitionStatsGenerator.class);
+
+  private final Table table;
+  private String branch;
+
+  public PartitionStatsGenerator(Table table) {
+    this.table = table;
+  }
+
+  public PartitionStatsGenerator(Table table, String branch) {
+    this.table = table;
+    this.branch = branch;
+  }
+
+  /**
+   * Computes the partition stats for the current snapshot and writes it into 
the metadata folder.
+   *
+   * @return {@link PartitionStatisticsFile} for the latest snapshot id or 
null if table doesn't
+   *     have any snapshot.
+   */
+  public PartitionStatisticsFile generate() {
+    Snapshot currentSnapshot = SnapshotUtil.latestSnapshot(table, branch);
+    if (currentSnapshot == null) {
+      Preconditions.checkArgument(
+          branch == null, "Couldn't find the snapshot for the branch %s", 
branch);
+      return null;
+    }
+
+    Types.StructType partitionType = Partitioning.partitionType(table);
+    // Map of partitionData, partition-stats-entry per partitionData.
+    Map<Record, Record> partitionEntryMap = Maps.newConcurrentMap();
+
+    Schema dataSchema = PartitionStatsUtil.schema(partitionType);
+    List<ManifestFile> manifestFiles = 
currentSnapshot.allManifests(table.io());
+    Tasks.foreach(manifestFiles)
+        .stopOnFailure()
+        .executeWith(ThreadPools.getWorkerPool())
+        .onFailure(
+            (file, thrown) ->
+                LOG.warn(
+                    "Failed to compute the partition stats for the manifest 
file: {}",
+                    file.path(),
+                    thrown))
+        .run(
+            manifest -> {
+              try (CloseableIterable<Record> entries =
+                  PartitionStatsUtil.fromManifest(table, manifest, 
dataSchema)) {
+                entries.forEach(
+                    entry ->
+                        partitionEntryMap.compute(

Review Comment:
   nit: I think it's more natural to use `merge` here than `compute`



##########
core/src/main/java/org/apache/iceberg/PartitionStatsUtil.java:
##########
@@ -0,0 +1,213 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg;
+
+import java.util.List;
+import java.util.Map;
+import org.apache.iceberg.data.GenericRecord;
+import org.apache.iceberg.data.Record;
+import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.types.Types;
+import org.apache.iceberg.util.PartitionUtil;
+
+public class PartitionStatsUtil {
+
+  private PartitionStatsUtil() {}
+
+  public enum Column {
+    PARTITION_DATA,
+    SPEC_ID,
+    DATA_RECORD_COUNT,
+    DATA_FILE_COUNT,
+    DATA_FILE_SIZE_IN_BYTES,
+    POSITION_DELETE_RECORD_COUNT,
+    POSITION_DELETE_FILE_COUNT,
+    EQUALITY_DELETE_RECORD_COUNT,
+    EQUALITY_DELETE_FILE_COUNT,
+    TOTAL_RECORD_COUNT,
+    LAST_UPDATED_AT,
+    LAST_UPDATED_SNAPSHOT_ID
+  }
+
+  public static Schema schema(Types.StructType partitionType) {
+    if (partitionType.fields().isEmpty()) {
+      throw new IllegalArgumentException("getting schema for an unpartitioned 
table");
+    }
+
+    return new Schema(
+        Types.NestedField.required(1, Column.PARTITION_DATA.name(), 
partitionType),
+        Types.NestedField.required(2, Column.SPEC_ID.name(), 
Types.IntegerType.get()),
+        Types.NestedField.required(3, Column.DATA_RECORD_COUNT.name(), 
Types.LongType.get()),
+        Types.NestedField.required(4, Column.DATA_FILE_COUNT.name(), 
Types.IntegerType.get()),
+        Types.NestedField.required(5, Column.DATA_FILE_SIZE_IN_BYTES.name(), 
Types.LongType.get()),
+        Types.NestedField.optional(
+            6, Column.POSITION_DELETE_RECORD_COUNT.name(), 
Types.LongType.get()),
+        Types.NestedField.optional(
+            7, Column.POSITION_DELETE_FILE_COUNT.name(), 
Types.IntegerType.get()),
+        Types.NestedField.optional(
+            8, Column.EQUALITY_DELETE_RECORD_COUNT.name(), 
Types.LongType.get()),
+        Types.NestedField.optional(
+            9, Column.EQUALITY_DELETE_FILE_COUNT.name(), 
Types.IntegerType.get()),
+        Types.NestedField.optional(10, Column.TOTAL_RECORD_COUNT.name(), 
Types.LongType.get()),
+        Types.NestedField.optional(11, Column.LAST_UPDATED_AT.name(), 
Types.LongType.get()),
+        Types.NestedField.optional(
+            12, Column.LAST_UPDATED_SNAPSHOT_ID.name(), Types.LongType.get()));
+  }
+
+  public static CloseableIterable<Record> fromManifest(
+      Table table, ManifestFile manifest, Schema recordSchema) {
+    CloseableIterable<? extends ManifestEntry<? extends ContentFile<?>>> 
entries =
+        CloseableIterable.transform(
+            ManifestFiles.open(manifest, table.io(), table.specs())
+                .select(scanColumns(manifest.content())) // don't select stats 
columns
+                .liveEntries(),
+            t ->
+                (ManifestEntry<? extends ContentFile<?>>)
+                    // defensive copy of manifest entry without stats columns
+                    t.copyWithoutStats());
+
+    return CloseableIterable.transform(
+        entries, entry -> fromManifestEntry(entry, table, recordSchema));
+  }
+
+  public static void updateRecord(Record toUpdate, Record fromRecord) {
+    toUpdate.set(
+        Column.SPEC_ID.ordinal(),
+        Math.max(
+            (int) toUpdate.get(Column.SPEC_ID.ordinal()),
+            (int) fromRecord.get(Column.SPEC_ID.ordinal())));
+    incrementLong(toUpdate, fromRecord, Column.DATA_RECORD_COUNT);
+    incrementInt(toUpdate, fromRecord, Column.DATA_FILE_COUNT);
+    incrementLong(toUpdate, fromRecord, Column.DATA_FILE_SIZE_IN_BYTES);
+    checkAndIncrementLong(toUpdate, fromRecord, 
Column.POSITION_DELETE_RECORD_COUNT);
+    checkAndIncrementInt(toUpdate, fromRecord, 
Column.POSITION_DELETE_FILE_COUNT);
+    checkAndIncrementLong(toUpdate, fromRecord, 
Column.EQUALITY_DELETE_RECORD_COUNT);
+    checkAndIncrementInt(toUpdate, fromRecord, 
Column.EQUALITY_DELETE_FILE_COUNT);
+    checkAndIncrementLong(toUpdate, fromRecord, Column.TOTAL_RECORD_COUNT);
+    if (toUpdate.get(Column.LAST_UPDATED_AT.ordinal()) != null
+        && fromRecord.get(Column.LAST_UPDATED_AT.ordinal()) != null
+        && ((long) toUpdate.get(Column.LAST_UPDATED_AT.ordinal())
+            < (long) fromRecord.get(Column.LAST_UPDATED_AT.ordinal()))) {
+      toUpdate.set(
+          Column.LAST_UPDATED_AT.ordinal(), 
fromRecord.get(Column.LAST_UPDATED_AT.ordinal()));
+      toUpdate.set(
+          Column.LAST_UPDATED_SNAPSHOT_ID.ordinal(),
+          fromRecord.get(Column.LAST_UPDATED_SNAPSHOT_ID.ordinal()));
+    }
+  }
+
+  public static Record partitionDataToRecord(

Review Comment:
   Not sure whether the conversion should happen here, or just before the data 
is fed to the writers. Does it make any difference whether we want the 
comparators/map to operate on internal or generic types?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to