aokolnychyi commented on code in PR #11146: URL: https://github.com/apache/iceberg/pull/11146#discussion_r1772724334
########## core/src/main/java/org/apache/iceberg/PartitionStatsUtil.java: ########## @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.Comparator; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.types.Comparators; +import org.apache.iceberg.types.Types.StructType; +import org.apache.iceberg.util.PartitionUtil; +import org.apache.iceberg.util.StructLikeMap; +import org.apache.iceberg.util.ThreadPools; + +public class PartitionStatsUtil { + + private PartitionStatsUtil() {} + + /** + * Computes the partition stats for the given snapshot of the table. + * + * @param table the table for which partition stats to be computed. + * @param snapshot the snapshot for which partition stats is computed. + * @return the collection of {@link PartitionStats} + */ + public static Collection<PartitionStats> computeStats(Table table, Snapshot snapshot) { + Preconditions.checkArgument(table != null, "table cannot be null"); + Preconditions.checkArgument(snapshot != null, "snapshot cannot be null"); + + StructType partitionType = Partitioning.partitionType(table); + if (partitionType.fields().isEmpty()) { + throw new UnsupportedOperationException( + "Computing partition stats for an unpartitioned table"); + } + + List<ManifestFile> manifests = snapshot.allManifests(table.io()); + + ExecutorService executorService = ThreadPools.getWorkerPool(); + List<Future<StructLikeMap<PartitionStats>>> futures = Lists.newArrayList(); + manifests.forEach( + manifest -> { + Future<StructLikeMap<PartitionStats>> future = + executorService.submit(() -> collectStats(table, manifest, partitionType)); + futures.add(future); + }); + + StructLikeMap<PartitionStats> statsMap = StructLikeMap.create(partitionType); + for (Future<StructLikeMap<PartitionStats>> future : futures) { + try { + future + .get() + .forEach( + (key, value) -> + statsMap.merge( + key, + value, + (existingEntry, newEntry) -> { + existingEntry.appendStats(newEntry); + return existingEntry; + })); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + } + + return statsMap.values(); + } + + /** + * Sorts the {@link PartitionStats} based on the partition data. + * + * @param stats collection of {@link PartitionStats} which needs to be sorted. + * @param partitionType unified partition schema. + * @return the list of {@link PartitionStats} + */ + public static List<PartitionStats> sortStats( + Collection<PartitionStats> stats, StructType partitionType) { + List<PartitionStats> entries = Lists.newArrayList(stats.iterator()); + entries.sort( + Comparator.comparing(PartitionStats::partition, Comparators.forType(partitionType))); + return entries; + } + + private static StructLikeMap<PartitionStats> collectStats( + Table table, ManifestFile manifest, StructType partitionType) { + try (ManifestReader<?> reader = openManifest(table, manifest)) { + StructLikeMap<PartitionStats> statsMap = StructLikeMap.create(partitionType); + + for (ManifestEntry<?> entry : reader.entries()) { + ContentFile<?> file = entry.file(); + PartitionSpec spec = table.specs().get(file.specId()); + PartitionData key = Review Comment: I think I understand why it starts to fail if we use the existing `coercePartition` method. It fails because the entry object is reused, meaning `partition` we project is mutable. Take a look [here](https://github.com/apache/iceberg/blob/72fd9ab9f44cfb3e4b08f849cf7b5132f84b9936/core/src/main/java/org/apache/iceberg/util/TableScanUtil.java#L156) at how we solve in `TableScanUtil`. ########## core/src/jmh/java/org/apache/iceberg/PartitionStatsUtilBenchmark.java: ########## @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg; + +import static org.apache.iceberg.types.Types.NestedField.optional; +import static org.apache.iceberg.types.Types.NestedField.required; +import static org.assertj.core.api.Assertions.assertThat; + +import java.io.IOException; +import java.util.Collection; +import java.util.concurrent.TimeUnit; +import java.util.stream.IntStream; +import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.types.Types; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Threads; +import org.openjdk.jmh.annotations.Timeout; +import org.openjdk.jmh.annotations.Warmup; + +@Fork(1) +@State(Scope.Benchmark) +@Warmup(iterations = 2) +@Measurement(iterations = 5) +@Timeout(time = 1000, timeUnit = TimeUnit.HOURS) +@BenchmarkMode(Mode.SingleShotTime) +public class PartitionStatsUtilBenchmark { + + private static final Schema SCHEMA = + new Schema( + required(1, "c1", Types.IntegerType.get()), + optional(2, "c2", Types.StringType.get()), + optional(3, "c3", Types.StringType.get())); + + private static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA).identity("c1").build(); + + // Create 10k manifests + private static final int MANIFEST_COUNTER = 10000; + + // each manifest with 100 partition values + private static final int PARTITION_PER_MANIFEST = 100; + + // 20 data files per partition, which results in 2k data files per manifest + private static final int DATA_FILES_PER_PARTITION_COUNT = 20; + + private static final HadoopTables TABLES = new HadoopTables(); + + private static final String TABLE_IDENT = "tbl"; + + private Table table; + + @Setup + public void setupBenchmark() { + table = TABLES.create(SCHEMA, SPEC, TABLE_IDENT); + + IntStream.range(0, MANIFEST_COUNTER) + .forEach( Review Comment: I am personally not a big fan of how Spotless formats `forEach` closures. I think it would be easier to understand with simple for loops. That's said, it is a personal preference. You can do what you like. ########## core/src/main/java/org/apache/iceberg/PartitionStats.java: ########## @@ -0,0 +1,249 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg; + +public class PartitionStats implements StructLike { + + private static final int STATS_COUNT = 12; + + private PartitionData partition; + private int specId; + private long dataRecordCount; + private int dataFileCount; + private long totalDataFileSizeInBytes; + private long positionDeleteRecordCount; + private int positionDeleteFileCount; + private long equalityDeleteRecordCount; + private int equalityDeleteFileCount; + private long totalRecordCount; + private Long lastUpdatedAt; // null by default + private Long lastUpdatedSnapshotId; // null by default + + public PartitionStats(PartitionData partition, int specId) { + this.partition = partition; + this.specId = specId; + } + + public PartitionData partition() { + return partition; + } + + public int specId() { + return specId; + } + + public long dataRecordCount() { + return dataRecordCount; + } + + public int dataFileCount() { + return dataFileCount; + } + + public long totalDataFileSizeInBytes() { + return totalDataFileSizeInBytes; + } + + public long positionDeleteRecordCount() { + return positionDeleteRecordCount; + } + + public int positionDeleteFileCount() { + return positionDeleteFileCount; + } + + public long equalityDeleteRecordCount() { + return equalityDeleteRecordCount; + } + + public int equalityDeleteFileCount() { + return equalityDeleteFileCount; + } + + public long totalRecordCount() { + return totalRecordCount; + } + + public Long lastUpdatedAt() { + return lastUpdatedAt; + } + + public Long lastUpdatedSnapshotId() { + return lastUpdatedSnapshotId; + } + + /** + * Updates the partition stats from the data/delete file. + * + * @param file the ContentFile from the manifest entry. + * @param snapshot the snapshot corresponding to the live entry. + */ + public void liveEntry(ContentFile<?> file, Snapshot snapshot) { + this.specId = Math.max(this.specId, file.specId()); + + switch (file.content()) { + case DATA: + this.dataRecordCount += file.recordCount(); + this.dataFileCount += 1; + this.totalDataFileSizeInBytes += file.fileSizeInBytes(); + break; + case POSITION_DELETES: + this.positionDeleteRecordCount += file.recordCount(); + this.positionDeleteFileCount += 1; + break; + case EQUALITY_DELETES: + this.equalityDeleteRecordCount += file.recordCount(); + this.equalityDeleteFileCount += 1; + break; + default: + throw new UnsupportedOperationException("Unsupported file content type: " + file.content()); + } + + if (snapshot != null) { + updateSnapshotInfo(snapshot.snapshotId(), snapshot.timestampMillis()); + } + + // Note: Not computing the `TOTAL_RECORD_COUNT` for now as it needs scanning the data. + } + + /** + * Updates the modified time and snapshot ID for the deleted manifest entry. + * + * @param snapshot the snapshot corresponding to the deleted manifest entry. + */ + public void deletedEntry(Snapshot snapshot) { + if (snapshot != null) { + updateSnapshotInfo(snapshot.snapshotId(), snapshot.timestampMillis()); + } + } + + /** + * Appends statistics from given entry to current entry. + * + * @param entry the entry from which statistics will be sourced. + */ + public void appendStats(PartitionStats entry) { + this.specId = Math.max(specId, entry.specId); Review Comment: We can't actually combine stats across spec IDs even if the coerced key happens to be the same. Partitions that belong to different specs aren't equivalent. For instance, we use `PartitionMap` for assigning deletes to ensure that not only the partition keys match but also specs. If we have `p1 = 10` in spec 1 and `p1 = 10 and p2 = null` in spec 2, these partitions are different and their stats can't be combined. Does this make sense? If so, we may need to use `PartitionMap` instead of `StructLikeMap`. ########## core/src/main/java/org/apache/iceberg/PartitionStats.java: ########## @@ -0,0 +1,253 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg; + +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; + +public class PartitionStats implements StructLike { + + private static final int STATS_COUNT = 12; + + private StructLike partition; + private int specId; + private long dataRecordCount; + private int dataFileCount; + private long totalDataFileSizeInBytes; + private long positionDeleteRecordCount; + private int positionDeleteFileCount; + private long equalityDeleteRecordCount; + private int equalityDeleteFileCount; + private long totalRecordCount; + private Long lastUpdatedAt; // null by default + private Long lastUpdatedSnapshotId; // null by default + + public PartitionStats(StructLike partition, int specId) { + this.partition = partition; + this.specId = specId; + } + + public StructLike partition() { + return partition; + } + + public int specId() { + return specId; + } + + public long dataRecordCount() { + return dataRecordCount; + } + + public int dataFileCount() { + return dataFileCount; + } + + public long totalDataFileSizeInBytes() { + return totalDataFileSizeInBytes; + } + + public long positionDeleteRecordCount() { + return positionDeleteRecordCount; + } + + public int positionDeleteFileCount() { + return positionDeleteFileCount; + } + + public long equalityDeleteRecordCount() { + return equalityDeleteRecordCount; + } + + public int equalityDeleteFileCount() { + return equalityDeleteFileCount; + } + + public long totalRecordCount() { + return totalRecordCount; + } + + public Long lastUpdatedAt() { + return lastUpdatedAt; + } + + public Long lastUpdatedSnapshotId() { + return lastUpdatedSnapshotId; + } + + /** + * Updates the partition stats from the data/delete file. + * + * @param file the ContentFile from the manifest entry. Review Comment: Question: Do we need `@link` here? ########## core/src/main/java/org/apache/iceberg/PartitionStatsUtil.java: ########## @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.Comparator; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.types.Comparators; +import org.apache.iceberg.types.Types.StructType; +import org.apache.iceberg.util.PartitionUtil; +import org.apache.iceberg.util.StructLikeMap; +import org.apache.iceberg.util.ThreadPools; + +public class PartitionStatsUtil { + + private PartitionStatsUtil() {} + + /** + * Computes the partition stats for the given snapshot of the table. + * + * @param table the table for which partition stats to be computed. + * @param snapshot the snapshot for which partition stats is computed. + * @return the collection of {@link PartitionStats} + */ + public static Collection<PartitionStats> computeStats(Table table, Snapshot snapshot) { + Preconditions.checkArgument(table != null, "table cannot be null"); + Preconditions.checkArgument(snapshot != null, "snapshot cannot be null"); + + StructType partitionType = Partitioning.partitionType(table); + if (partitionType.fields().isEmpty()) { + throw new UnsupportedOperationException( + "Computing partition stats for an unpartitioned table"); + } + + List<ManifestFile> manifests = snapshot.allManifests(table.io()); + + ExecutorService executorService = ThreadPools.getWorkerPool(); Review Comment: Will something like below work too? ``` List<ManifestFile> manifests = snapshot.allManifests(table.io()); Queue<PartitionMap<PartitionStats>> statsByManifest = Queues.newConcurrentLinkedQueue(); Tasks.foreach(manifests) .stopOnFailure() .throwFailureWhenFinished() .executeWith(ThreadPools.getWorkerPool()) .run(manifest -> statsByManifest.add(computeStats(table, manifest, partitionType))); PartitionMap<PartitionStats> stats = merge(table, statsByManifest); return stats.values(); ``` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org