SourabhBadhya commented on code in PR #14435: URL: https://github.com/apache/iceberg/pull/14435#discussion_r2642601711
########## parquet/src/main/java/org/apache/iceberg/parquet/ParquetFileMerger.java: ########## @@ -0,0 +1,571 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.parquet; + +import static java.util.Collections.emptyMap; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; +import java.util.function.LongUnaryOperator; +import org.apache.hadoop.conf.Configuration; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.DataFiles; +import org.apache.iceberg.FileFormat; +import org.apache.iceberg.MetadataColumns; +import org.apache.iceberg.Metrics; +import org.apache.iceberg.MetricsConfig; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.StructLike; +import org.apache.iceberg.hadoop.HadoopOutputFile; +import org.apache.iceberg.io.FileIO; +import org.apache.iceberg.io.InputFile; +import org.apache.iceberg.io.OutputFile; +import org.apache.iceberg.io.SeekableInputStream; +import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.types.Conversions; +import org.apache.iceberg.types.Types.LongType; +import org.apache.parquet.bytes.BytesInput; +import org.apache.parquet.bytes.BytesUtils; +import org.apache.parquet.bytes.DirectByteBufferAllocator; +import org.apache.parquet.bytes.HeapByteBufferAllocator; +import org.apache.parquet.column.ColumnDescriptor; +import org.apache.parquet.column.Encoding; +import org.apache.parquet.column.ParquetProperties; +import org.apache.parquet.column.statistics.Statistics; +import org.apache.parquet.column.values.ValuesWriter; +import org.apache.parquet.column.values.delta.DeltaBinaryPackingValuesWriterForLong; +import org.apache.parquet.hadoop.CodecFactory; +import org.apache.parquet.hadoop.ParquetFileReader; +import org.apache.parquet.hadoop.ParquetFileWriter; +import org.apache.parquet.hadoop.ParquetOutputFormat; +import org.apache.parquet.hadoop.metadata.BlockMetaData; +import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; +import org.apache.parquet.hadoop.metadata.CompressionCodecName; +import org.apache.parquet.io.DelegatingSeekableInputStream; +import org.apache.parquet.schema.MessageType; +import org.apache.parquet.schema.PrimitiveType; +import org.apache.parquet.schema.Type; +import org.apache.parquet.schema.Types; + +/** + * Utility class for performing strict schema validation and merging of Parquet files at the + * row-group level. + */ +public class ParquetFileMerger { + // Default buffer sizes for DeltaBinaryPackingValuesWriter + private static final int DEFAULT_INITIAL_BUFFER_SIZE = 64 * 1024; // 64KB + private static final int DEFAULT_PAGE_SIZE_FOR_ENCODING = 64 * 1024; // 64KB + private static final PrimitiveType ROW_ID_TYPE = + Types.required(PrimitiveType.PrimitiveTypeName.INT64) + .id(MetadataColumns.ROW_ID.fieldId()) + .named(MetadataColumns.ROW_ID.name()); + + private static final PrimitiveType LAST_UPDATED_SEQUENCE_NUMBER_TYPE = + Types.required(PrimitiveType.PrimitiveTypeName.INT64) + .id(MetadataColumns.LAST_UPDATED_SEQUENCE_NUMBER.fieldId()) + .named(MetadataColumns.LAST_UPDATED_SEQUENCE_NUMBER.name()); + + private static final ColumnDescriptor ROW_ID_DESCRIPTOR = + new ColumnDescriptor(new String[] {MetadataColumns.ROW_ID.name()}, ROW_ID_TYPE, 0, 0); + private static final ColumnDescriptor LAST_UPDATED_SEQUENCE_NUMBER_DESCRIPTOR = + new ColumnDescriptor( + new String[] {MetadataColumns.LAST_UPDATED_SEQUENCE_NUMBER.name()}, + LAST_UPDATED_SEQUENCE_NUMBER_TYPE, + 0, + 0); + + private ParquetFileMerger() { + // Utility class - prevent instantiation + } + + /** + * Checks whether the provided DataFiles are eligible for merging and, if successful, returns the + * common Parquet schema. + * + * <p>This method validates: + * + * <ul> + * <li>All files must have compatible schemas (identical {@link MessageType}) + * <li>Files must not be encrypted + * <li>Files must not have associated delete files or delete vectors + * <li>All files have the same partition spec + * <li>Table must not have a sort order (including z-ordered tables) + * <li>No files exceed the target output size (not splitting large files) + * </ul> + * + * <p>This validation is useful for compaction operations in Spark, Flink, or other engines that + * need to ensure files can be safely merged. The returned MessageType can be passed to {@link + * #binaryMerge} to avoid re-reading the schema. + * + * @param dataFiles List of DataFiles to validate + * @param fileIO FileIO to use for reading files + * @param targetOutputSize Maximum size for output file (files larger than this cannot be merged) + * @return MessageType schema if files can be merged, null otherwise + */ + public static MessageType canMergeAndGetSchema( + List<DataFile> dataFiles, FileIO fileIO, long targetOutputSize) { + Preconditions.checkArgument( + dataFiles != null && !dataFiles.isEmpty(), "dataFiles cannot be null or empty"); + + // Single loop to check partition spec consistency, file sizes, and build InputFile list + int firstSpecId = dataFiles.get(0).specId(); + List<InputFile> inputFiles = Lists.newArrayListWithCapacity(dataFiles.size()); + for (DataFile dataFile : dataFiles) { + if (dataFile.specId() != firstSpecId) { + return null; + } + + if (dataFile.fileSizeInBytes() > targetOutputSize) { + return null; + } + + inputFiles.add(fileIO.newInputFile(dataFile.location())); + } + + return canMergeAndGetSchema(inputFiles); + } + + private static MessageType readSchema(InputFile inputFile) throws IOException { + ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(inputFile)); Review Comment: Can we try to inline the reader within the try-with-resources block. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
