shangxinli commented on code in PR #14435:
URL: https://github.com/apache/iceberg/pull/14435#discussion_r2565628127


##########
parquet/src/main/java/org/apache/iceberg/parquet/ParquetFileMerger.java:
##########
@@ -0,0 +1,498 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.parquet;
+
+import static java.util.Collections.emptyMap;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import org.apache.iceberg.MetadataColumns;
+import org.apache.iceberg.encryption.EncryptedOutputFile;
+import org.apache.iceberg.io.InputFile;
+import org.apache.iceberg.io.OutputFile;
+import org.apache.iceberg.io.SeekableInputStream;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.parquet.bytes.BytesInput;
+import org.apache.parquet.bytes.HeapByteBufferAllocator;
+import org.apache.parquet.column.ColumnDescriptor;
+import org.apache.parquet.column.Encoding;
+import org.apache.parquet.column.ParquetProperties;
+import org.apache.parquet.column.statistics.LongStatistics;
+import org.apache.parquet.column.values.ValuesWriter;
+import 
org.apache.parquet.column.values.delta.DeltaBinaryPackingValuesWriterForLong;
+import org.apache.parquet.hadoop.ParquetFileReader;
+import org.apache.parquet.hadoop.ParquetFileWriter;
+import org.apache.parquet.hadoop.metadata.BlockMetaData;
+import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
+import org.apache.parquet.hadoop.metadata.CompressionCodecName;
+import org.apache.parquet.io.DelegatingSeekableInputStream;
+import org.apache.parquet.schema.MessageType;
+import org.apache.parquet.schema.PrimitiveType;
+import org.apache.parquet.schema.Type;
+import org.apache.parquet.schema.Types;
+
+/**
+ * Utility class for performing strict schema validation and merging of 
Parquet files at the
+ * row-group level.
+ *
+ * <p>This class ensures that all input files have identical Parquet schemas 
before merging. The
+ * merge operation is performed by copying row groups directly without
+ * serialization/deserialization, providing significant performance benefits 
over traditional
+ * read-rewrite approaches.
+ *
+ * <p>This class works with any Iceberg FileIO implementation (HadoopFileIO, 
S3FileIO, GCSFileIO,
+ * etc.), making it cloud-agnostic.
+ *
+ * <p>TODO: Encrypted tables are not supported
+ *
+ * <p>Key features:
+ *
+ * <ul>
+ *   <li>Row group merging without deserialization using {@link 
ParquetFileWriter#appendFile}
+ *   <li>Strict schema validation - all files must have identical {@link 
MessageType}
+ *   <li>Metadata merging for Iceberg-specific footer data
+ *   <li>Works with any FileIO implementation (local, S3, GCS, Azure, etc.)
+ * </ul>
+ *
+ * <p>Restrictions:
+ *
+ * <ul>
+ *   <li>All files must have compatible schemas (identical {@link MessageType})
+ *   <li>Files must not be encrypted
+ *   <li>Files must not have associated delete files or delete vectors
+ *   <li>Table must not have a sort order (including z-ordered tables)
+ * </ul>
+ *
+ * <p>Typical usage:
+ *
+ * <pre>
+ * ValidationResult result = 
ParquetFileMerger.readAndValidateSchema(inputFiles);
+ * if (result != null) {
+ *   ParquetFileMerger.mergeFiles(
+ *       inputFiles, encryptedOutputFile, result.schema(), firstRowIds,
+ *       rowGroupSize, columnIndexTruncateLength, result.metadata());
+ * }
+ * </pre>
+ */
+public class ParquetFileMerger {
+  // Default buffer sizes for DeltaBinaryPackingValuesWriter
+  private static final int DEFAULT_INITIAL_BUFFER_SIZE = 64 * 1024; // 64KB
+  private static final int DEFAULT_PAGE_SIZE_FOR_ENCODING = 64 * 1024; // 64KB
+
+  private ParquetFileMerger() {
+    // Utility class - prevent instantiation
+  }
+
+  /**
+   * Validates that all input files can be merged.
+   *
+   * <p>This method validates that:
+   *
+   * <ul>
+   *   <li>All files are valid Parquet format (detected by reading Parquet 
footer)
+   *   <li>All files have identical schemas
+   *   <li>Files are not encrypted (detected by ParquetCryptoRuntimeException)
+   *   <li>If a physical _row_id column exists, all values are non-null
+   * </ul>
+   *
+   * <p>This method works with any Iceberg FileIO implementation (S3FileIO, 
GCSFileIO, etc.).
+   *
+   * @param inputFiles List of Iceberg input files to validate
+   * @return true if files can be merged, false otherwise
+   */
+  public static boolean canMerge(List<InputFile> inputFiles) {
+    try {
+      if (inputFiles == null || inputFiles.isEmpty()) {
+        return false;
+      }
+
+      // Read schema from the first file
+      MessageType firstSchema = readSchema(inputFiles.get(0));
+
+      // Check if schema has a physical _row_id column
+      boolean hasRowIdColumn = 
firstSchema.containsField(MetadataColumns.ROW_ID.name());
+
+      // Validate all files have the same schema
+      for (int i = 1; i < inputFiles.size(); i++) {
+        MessageType currentSchema = readSchema(inputFiles.get(i));
+
+        if (!firstSchema.equals(currentSchema)) {
+          return false;
+        }
+      }
+
+      // If there's a physical _row_id column, validate no nulls
+      if (hasRowIdColumn) {
+        validateRowIdColumnHasNoNulls(inputFiles);
+      }
+
+      return true;
+    } catch (RuntimeException | IOException e) {
+      // Returns false for:
+      // - Non-Parquet files (IOException when reading Parquet footer)
+      // - Encrypted files (ParquetCryptoRuntimeException extends 
RuntimeException)
+      // - Files with null _row_id values (IllegalArgumentException from
+      // validateRowIdColumnHasNoNulls)
+      // - Any other validation failures
+      return false;
+    }
+  }
+
+  /**
+   * Reads the Parquet schema from an Iceberg InputFile.
+   *
+   * @param inputFile Iceberg input file to read schema from
+   * @return MessageType schema of the Parquet file
+   * @throws IOException if reading fails
+   */
+  public static MessageType readSchema(InputFile inputFile) throws IOException 
{
+    return ParquetFileReader.open(ParquetIO.file(inputFile))
+        .getFooter()
+        .getFileMetaData()
+        .getSchema();
+  }
+
+  /**
+   * Reads the Parquet metadata (key-value pairs) from an Iceberg InputFile.
+   *
+   * @param inputFile Iceberg input file to read metadata from
+   * @return Map of key-value metadata from the Parquet file
+   * @throws IOException if reading fails
+   */
+  public static Map<String, String> readMetadata(InputFile inputFile) throws 
IOException {
+    return ParquetFileReader.open(ParquetIO.file(inputFile))
+        .getFooter()
+        .getFileMetaData()
+        .getKeyValueMetaData();
+  }
+
+  /**
+   * Validates that all _row_id values are non-null in the input files.
+   *
+   * <p>When files already have a physical _row_id column and we're doing row 
lineage processing, we
+   * cannot automatically calculate null values during binary merge. This 
method ensures all _row_id
+   * values are present.
+   *
+   * @param inputFiles List of input files to validate
+   * @throws IllegalArgumentException if any _row_id column contains null 
values
+   * @throws IOException if reading file metadata fails
+   */
+  private static void validateRowIdColumnHasNoNulls(List<InputFile> 
inputFiles) throws IOException {
+    for (InputFile inputFile : inputFiles) {
+      try (ParquetFileReader reader = 
ParquetFileReader.open(ParquetIO.file(inputFile))) {
+        List<BlockMetaData> rowGroups = reader.getFooter().getBlocks();
+
+        for (BlockMetaData rowGroup : rowGroups) {
+          for (ColumnChunkMetaData columnChunk : rowGroup.getColumns()) {
+            // Check if this is the _row_id column
+            if 
(columnChunk.getPath().toDotString().equals(MetadataColumns.ROW_ID.name())) {
+              org.apache.parquet.column.statistics.Statistics<?> stats =
+                  columnChunk.getStatistics();
+              if (stats != null && stats.getNumNulls() > 0) {
+                throw new IllegalArgumentException(
+                    String.format(
+                        Locale.ROOT,
+                        "File %s contains null values in _row_id column (row 
group has %d nulls). "
+                            + "Cannot merge files with null _row_id values 
using binary copy.",
+                        inputFile.location(),
+                        stats.getNumNulls()));
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+  /** Internal method to merge files when schema is already known. */
+  private static void mergeFilesWithSchema(
+      List<InputFile> inputFiles,
+      OutputFile outputFile,
+      MessageType schema,
+      long rowGroupSize,
+      int columnIndexTruncateLength,
+      Map<String, String> extraMetadata)
+      throws IOException {
+    try (ParquetFileWriter writer =
+        new ParquetFileWriter(
+            ParquetIO.file(outputFile),
+            schema,
+            ParquetFileWriter.Mode.CREATE,
+            rowGroupSize,
+            0, // maxPaddingSize - hardcoded to 0 (same as ParquetWriter)
+            columnIndexTruncateLength,
+            ParquetProperties.DEFAULT_STATISTICS_TRUNCATE_LENGTH,
+            ParquetProperties.DEFAULT_PAGE_WRITE_CHECKSUM_ENABLED)) {
+
+      writer.start();
+      for (InputFile inputFile : inputFiles) {
+        writer.appendFile(ParquetIO.file(inputFile));
+      }
+
+      if (extraMetadata != null && !extraMetadata.isEmpty()) {
+        writer.end(extraMetadata);
+      } else {
+        writer.end(emptyMap());
+      }
+    }
+  }
+
+  /** Internal method to merge files with row IDs when base schema is already 
known. */
+  private static void mergeFilesWithRowIdsAndSchema(
+      List<InputFile> inputFiles,
+      OutputFile outputFile,
+      List<Long> firstRowIds,
+      MessageType baseSchema,
+      long rowGroupSize,
+      int columnIndexTruncateLength,
+      Map<String, String> extraMetadata)
+      throws IOException {
+    // Extend schema to include _row_id column
+    MessageType extendedSchema = addRowIdColumn(baseSchema);

Review Comment:
   Good catch. I should have added it earlier. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to