parthchandra commented on code in PR #13786:
URL: https://github.com/apache/iceberg/pull/13786#discussion_r2593988417


##########
parquet/src/main/java/org/apache/iceberg/parquet/Parquet.java:
##########
@@ -127,6 +127,7 @@
 
 public class Parquet {
   private static final Logger LOG = LoggerFactory.getLogger(Parquet.class);
+  private static final String VECTORIZED_READER_FACTORY = 
"read.parquet.vectorized-reader.factory";

Review Comment:
   I've added a bunch of tests for this. For the Comet specific code, there is 
a plan to add a diff file that can be applied to change the reader for all the 
existing tests and we can add a ci test pipeline that verifies that nothing is 
broken.



##########
parquet/src/main/java/org/apache/iceberg/parquet/ReadConf.java:
##########
@@ -60,7 +60,7 @@ class ReadConf<T> {
   private final List<Map<ColumnPath, ColumnChunkMetaData>> 
columnChunkMetaDataForRowGroups;
 
   @SuppressWarnings("unchecked")
-  ReadConf(
+  public ReadConf(

Review Comment:
   I would rather not rename an existing class. I feel it will reduce confusion 
on the scope of the changes.



##########
parquet/src/main/java/org/apache/iceberg/parquet/Parquet.java:
##########
@@ -1537,6 +1589,27 @@ public <D> CloseableIterable<D> build() {
     }
   }
 
+  private static VectorizedParquetReaderFactory loadReaderFactory(String 
className) {
+    try {
+      Class<?> factoryClass = Class.forName(className);
+      if 
(!VectorizedParquetReaderFactory.class.isAssignableFrom(factoryClass)) {
+        LOG.warn("Class {} does not implement VectorizedParquetReaderFactory 
interface", className);
+        return null;

Review Comment:
   We lose a little bit of helpful text in the error, but made the change.



##########
spark/v4.0/spark/src/main/java/org/apache/iceberg/spark/parquet/CometVectorizedParquetReader.java:
##########
@@ -0,0 +1,352 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark.parquet;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.function.Function;
+import org.apache.comet.parquet.FileReader;
+import org.apache.comet.parquet.ParquetColumnSpec;
+import org.apache.comet.parquet.ReadOptions;
+import org.apache.comet.parquet.RowGroupReader;
+import org.apache.comet.parquet.WrappedInputFile;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.exceptions.RuntimeIOException;
+import org.apache.iceberg.expressions.Expression;
+import org.apache.iceberg.expressions.Expressions;
+import org.apache.iceberg.io.CloseableGroup;
+import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.io.CloseableIterator;
+import org.apache.iceberg.io.InputFile;
+import org.apache.iceberg.mapping.NameMapping;
+import org.apache.iceberg.parquet.ReadConf;
+import org.apache.iceberg.parquet.VectorizedReader;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.util.ByteBuffers;
+import org.apache.parquet.ParquetReadOptions;
+import org.apache.parquet.column.ColumnDescriptor;
+import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
+import org.apache.parquet.hadoop.metadata.ColumnPath;
+import org.apache.parquet.schema.MessageType;
+
+public class CometVectorizedParquetReader<T> extends CloseableGroup
+    implements CloseableIterable<T> {
+  private final InputFile input;
+  private final ParquetReadOptions options;
+  private final Schema expectedSchema;
+  private final Function<MessageType, VectorizedReader<?>> batchReaderFunc;
+  private final Expression filter;
+  private final boolean reuseContainers;
+  private final boolean caseSensitive;
+  private final int batchSize;
+  private final NameMapping nameMapping;
+  private final Map<String, String> properties;
+  private final Long start;
+  private final Long length;
+  private final ByteBuffer fileEncryptionKey;
+  private final ByteBuffer fileAADPrefix;
+
+  private CometVectorizedParquetReader(
+      InputFile input,
+      Schema expectedSchema,
+      ParquetReadOptions options,
+      Function<MessageType, VectorizedReader<?>> readerFunc,
+      NameMapping nameMapping,
+      Expression filter,
+      boolean reuseContainers,
+      boolean caseSensitive,
+      int maxRecordsPerBatch,
+      Map<String, String> properties,
+      Long start,
+      Long length,
+      ByteBuffer fileEncryptionKey,
+      ByteBuffer fileAADPrefix) {
+    this.input = input;
+    this.expectedSchema = expectedSchema;
+    this.options = options;
+    this.batchReaderFunc = readerFunc;
+    // replace alwaysTrue with null to avoid extra work evaluating a trivial 
filter
+    this.filter = filter == Expressions.alwaysTrue() ? null : filter;
+    this.reuseContainers = reuseContainers;
+    this.caseSensitive = caseSensitive;
+    this.batchSize = maxRecordsPerBatch;
+    this.nameMapping = nameMapping;
+    this.properties = properties;
+    this.start = start;
+    this.length = length;
+    this.fileEncryptionKey = fileEncryptionKey;
+    this.fileAADPrefix = fileAADPrefix;
+  }
+
+  public static Builder builder(
+      InputFile file,
+      Schema schema,
+      ParquetReadOptions options,
+      Function<MessageType, VectorizedReader<?>> batchedReaderFunc) {
+    return new Builder(file, schema, options, batchedReaderFunc);
+  }
+
+  public static class Builder {
+    private final InputFile file;
+    private final Schema schema;
+    private final ParquetReadOptions options;
+    private final Function<MessageType, VectorizedReader<?>> batchedReaderFunc;
+    private NameMapping nameMapping = null;
+    private Expression filter = null;
+    private boolean reuseContainers = false;
+    private boolean caseSensitive = true;
+    private int maxRecordsPerBatch = 10000;
+    private Map<String, String> properties = null;
+    private Long start = null;
+    private Long length = null;
+    private ByteBuffer fileEncryptionKey = null;
+    private ByteBuffer fileAADPrefix = null;
+
+    private Builder(
+        InputFile file,
+        Schema schema,
+        ParquetReadOptions options,
+        Function<MessageType, VectorizedReader<?>> batchedReaderFunc) {
+      this.file = file;
+      this.schema = schema;
+      this.options = options;
+      this.batchedReaderFunc = batchedReaderFunc;
+    }
+
+    public Builder nameMapping(NameMapping mapping) {
+      this.nameMapping = mapping;
+      return this;
+    }
+
+    public Builder filter(Expression filterExpr) {
+      this.filter = filterExpr;
+      return this;
+    }
+
+    public Builder reuseContainers(boolean reuse) {
+      this.reuseContainers = reuse;
+      return this;
+    }
+
+    public Builder caseSensitive(boolean sensitive) {
+      this.caseSensitive = sensitive;
+      return this;
+    }
+
+    public Builder maxRecordsPerBatch(int maxRecords) {
+      this.maxRecordsPerBatch = maxRecords;
+      return this;
+    }
+
+    public Builder properties(Map<String, String> props) {
+      this.properties = props;
+      return this;
+    }
+
+    public Builder split(Long splitStart, Long splitLength) {
+      this.start = splitStart;
+      this.length = splitLength;
+      return this;
+    }
+
+    public Builder encryption(ByteBuffer encryptionKey, ByteBuffer aadPrefix) {
+      this.fileEncryptionKey = encryptionKey;
+      this.fileAADPrefix = aadPrefix;
+      return this;
+    }
+
+    public <T> CometVectorizedParquetReader<T> build() {
+      return new CometVectorizedParquetReader<>(
+          file,
+          schema,
+          options,
+          batchedReaderFunc,
+          nameMapping,
+          filter,
+          reuseContainers,
+          caseSensitive,
+          maxRecordsPerBatch,
+          properties,
+          start,
+          length,
+          fileEncryptionKey,
+          fileAADPrefix);
+    }
+  }
+
+  private ReadConf conf = null;
+
+  private ReadConf init() {
+    if (conf == null) {

Review Comment:
   I don't think it needs to be. This is pretty much what is done in 
VectorizedParquetReader



##########
spark/v4.0/spark/src/main/java/org/apache/iceberg/spark/parquet/CometVectorizedParquetReader.java:
##########
@@ -0,0 +1,352 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark.parquet;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.function.Function;
+import org.apache.comet.parquet.FileReader;
+import org.apache.comet.parquet.ParquetColumnSpec;
+import org.apache.comet.parquet.ReadOptions;
+import org.apache.comet.parquet.RowGroupReader;
+import org.apache.comet.parquet.WrappedInputFile;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.exceptions.RuntimeIOException;
+import org.apache.iceberg.expressions.Expression;
+import org.apache.iceberg.expressions.Expressions;
+import org.apache.iceberg.io.CloseableGroup;
+import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.io.CloseableIterator;
+import org.apache.iceberg.io.InputFile;
+import org.apache.iceberg.mapping.NameMapping;
+import org.apache.iceberg.parquet.ReadConf;
+import org.apache.iceberg.parquet.VectorizedReader;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.util.ByteBuffers;
+import org.apache.parquet.ParquetReadOptions;
+import org.apache.parquet.column.ColumnDescriptor;
+import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
+import org.apache.parquet.hadoop.metadata.ColumnPath;
+import org.apache.parquet.schema.MessageType;
+
+public class CometVectorizedParquetReader<T> extends CloseableGroup
+    implements CloseableIterable<T> {
+  private final InputFile input;
+  private final ParquetReadOptions options;
+  private final Schema expectedSchema;
+  private final Function<MessageType, VectorizedReader<?>> batchReaderFunc;
+  private final Expression filter;
+  private final boolean reuseContainers;
+  private final boolean caseSensitive;
+  private final int batchSize;
+  private final NameMapping nameMapping;
+  private final Map<String, String> properties;
+  private final Long start;
+  private final Long length;
+  private final ByteBuffer fileEncryptionKey;
+  private final ByteBuffer fileAADPrefix;
+
+  private CometVectorizedParquetReader(
+      InputFile input,
+      Schema expectedSchema,
+      ParquetReadOptions options,
+      Function<MessageType, VectorizedReader<?>> readerFunc,
+      NameMapping nameMapping,
+      Expression filter,
+      boolean reuseContainers,
+      boolean caseSensitive,
+      int maxRecordsPerBatch,
+      Map<String, String> properties,
+      Long start,
+      Long length,
+      ByteBuffer fileEncryptionKey,
+      ByteBuffer fileAADPrefix) {
+    this.input = input;
+    this.expectedSchema = expectedSchema;
+    this.options = options;
+    this.batchReaderFunc = readerFunc;
+    // replace alwaysTrue with null to avoid extra work evaluating a trivial 
filter
+    this.filter = filter == Expressions.alwaysTrue() ? null : filter;
+    this.reuseContainers = reuseContainers;
+    this.caseSensitive = caseSensitive;
+    this.batchSize = maxRecordsPerBatch;
+    this.nameMapping = nameMapping;
+    this.properties = properties;
+    this.start = start;
+    this.length = length;
+    this.fileEncryptionKey = fileEncryptionKey;
+    this.fileAADPrefix = fileAADPrefix;
+  }
+
+  public static Builder builder(
+      InputFile file,
+      Schema schema,
+      ParquetReadOptions options,
+      Function<MessageType, VectorizedReader<?>> batchedReaderFunc) {
+    return new Builder(file, schema, options, batchedReaderFunc);
+  }
+
+  public static class Builder {
+    private final InputFile file;
+    private final Schema schema;
+    private final ParquetReadOptions options;
+    private final Function<MessageType, VectorizedReader<?>> batchedReaderFunc;
+    private NameMapping nameMapping = null;
+    private Expression filter = null;
+    private boolean reuseContainers = false;
+    private boolean caseSensitive = true;
+    private int maxRecordsPerBatch = 10000;
+    private Map<String, String> properties = null;
+    private Long start = null;
+    private Long length = null;
+    private ByteBuffer fileEncryptionKey = null;
+    private ByteBuffer fileAADPrefix = null;
+
+    private Builder(
+        InputFile file,
+        Schema schema,
+        ParquetReadOptions options,
+        Function<MessageType, VectorizedReader<?>> batchedReaderFunc) {
+      this.file = file;
+      this.schema = schema;
+      this.options = options;
+      this.batchedReaderFunc = batchedReaderFunc;
+    }
+
+    public Builder nameMapping(NameMapping mapping) {
+      this.nameMapping = mapping;
+      return this;
+    }
+
+    public Builder filter(Expression filterExpr) {
+      this.filter = filterExpr;
+      return this;
+    }
+
+    public Builder reuseContainers(boolean reuse) {
+      this.reuseContainers = reuse;
+      return this;
+    }
+
+    public Builder caseSensitive(boolean sensitive) {
+      this.caseSensitive = sensitive;
+      return this;
+    }
+
+    public Builder maxRecordsPerBatch(int maxRecords) {
+      this.maxRecordsPerBatch = maxRecords;
+      return this;
+    }
+
+    public Builder properties(Map<String, String> props) {
+      this.properties = props;
+      return this;
+    }
+
+    public Builder split(Long splitStart, Long splitLength) {
+      this.start = splitStart;
+      this.length = splitLength;
+      return this;
+    }
+
+    public Builder encryption(ByteBuffer encryptionKey, ByteBuffer aadPrefix) {
+      this.fileEncryptionKey = encryptionKey;
+      this.fileAADPrefix = aadPrefix;
+      return this;
+    }
+
+    public <T> CometVectorizedParquetReader<T> build() {
+      return new CometVectorizedParquetReader<>(
+          file,
+          schema,
+          options,
+          batchedReaderFunc,
+          nameMapping,
+          filter,
+          reuseContainers,
+          caseSensitive,
+          maxRecordsPerBatch,
+          properties,
+          start,
+          length,
+          fileEncryptionKey,
+          fileAADPrefix);
+    }
+  }
+
+  private ReadConf conf = null;
+
+  private ReadConf init() {
+    if (conf == null) {
+      ReadConf readConf =
+          new ReadConf(
+              input,
+              options,
+              expectedSchema,
+              filter,
+              null,
+              batchReaderFunc,
+              nameMapping,
+              reuseContainers,
+              caseSensitive,
+              batchSize);
+      this.conf = readConf.copy();

Review Comment:
   TBH, I don't know myself. Based it on what is being done in 
VectorizedParquetReader



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to