aokolnychyi commented on code in PR #9841: URL: https://github.com/apache/iceberg/pull/9841#discussion_r1933112611
########## spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/data/vectorized/CometColumnarBatchReader.java: ########## @@ -0,0 +1,198 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark.data.vectorized; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.List; +import java.util.Map; +import org.apache.comet.parquet.AbstractColumnReader; +import org.apache.comet.parquet.BatchReader; +import org.apache.iceberg.Schema; +import org.apache.iceberg.data.DeleteFilter; +import org.apache.iceberg.parquet.VectorizedReader; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +import org.apache.iceberg.spark.SparkSchemaUtil; +import org.apache.iceberg.util.Pair; +import org.apache.parquet.column.page.PageReadStore; +import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; +import org.apache.parquet.hadoop.metadata.ColumnPath; +import org.apache.spark.sql.catalyst.InternalRow; +import org.apache.spark.sql.vectorized.ColumnVector; +import org.apache.spark.sql.vectorized.ColumnarBatch; + +/** + * {@link VectorizedReader} that returns Spark's {@link ColumnarBatch} to support Spark's vectorized + * read path. The {@link ColumnarBatch} returned is created by passing in the Arrow vectors + * populated via delegated read calls to {@link CometColumnReader VectorReader(s)}. + */ +@SuppressWarnings("checkstyle:VisibilityModifier") +class CometColumnarBatchReader implements VectorizedReader<ColumnarBatch> { + + private final CometColumnReader[] readers; + private final boolean hasIsDeletedColumn; + private DeleteFilter<InternalRow> deletes = null; + private long rowStartPosInBatch = 0; + // The delegated batch reader on Comet side + private final BatchReader delegate; + + CometColumnarBatchReader(List<VectorizedReader<?>> readers, Schema schema) { + this.readers = + readers.stream().map(CometColumnReader.class::cast).toArray(CometColumnReader[]::new); + this.hasIsDeletedColumn = + readers.stream().anyMatch(reader -> reader instanceof CometDeleteColumnReader); + + AbstractColumnReader[] abstractColumnReaders = new AbstractColumnReader[readers.size()]; + delegate = new BatchReader(abstractColumnReaders); + delegate.setSparkSchema(SparkSchemaUtil.convert(schema)); + } + + @Override + public void setRowGroupInfo( + PageReadStore pageStore, Map<ColumnPath, ColumnChunkMetaData> metaData, long rowPosition) { + setRowGroupInfo(pageStore, metaData); + } + + @Override + public void setRowGroupInfo( + PageReadStore pageStore, Map<ColumnPath, ColumnChunkMetaData> metaData) { + for (int i = 0; i < readers.length; i++) { + try { + if (!(readers[i] instanceof CometConstantColumnReader) + && !(readers[i] instanceof CometPositionColumnReader) + && !(readers[i] instanceof CometDeleteColumnReader)) { + readers[i].reset(); + readers[i].setPageReader(pageStore.getPageReader(readers[i].getDescriptor())); + } + } catch (IOException e) { + throw new UncheckedIOException("Failed to setRowGroupInfo for Comet vectorization", e); + } + } + + for (int i = 0; i < readers.length; i++) { + delegate.getColumnReaders()[i] = this.readers[i].getDelegate(); + } + + this.rowStartPosInBatch = + pageStore + .getRowIndexOffset() + .orElseThrow( + () -> + new IllegalArgumentException( + "PageReadStore does not contain row index offset")); + } + + public void setDeleteFilter(DeleteFilter<InternalRow> deleteFilter) { + this.deletes = deleteFilter; + } + + @Override + public final ColumnarBatch read(ColumnarBatch reuse, int numRowsToRead) { + ColumnarBatch columnarBatch = new ColumnBatchLoader(numRowsToRead).loadDataToColumnBatch(); + rowStartPosInBatch += numRowsToRead; + return columnarBatch; + } + + @Override + public void setBatchSize(int batchSize) { + for (CometColumnReader reader : readers) { + if (reader != null) { + reader.setBatchSize(batchSize); + } + } + } + + @Override + public void close() { + for (CometColumnReader reader : readers) { + if (reader != null) { + reader.close(); + } + } + } + + private class ColumnBatchLoader { + private final int batchSize; + + ColumnBatchLoader(int numRowsToRead) { + Preconditions.checkArgument( + numRowsToRead > 0, "Invalid number of rows to read: %s", numRowsToRead); + this.batchSize = numRowsToRead; + } + + ColumnarBatch loadDataToColumnBatch() { + ColumnVector[] vectors = readDataToColumnVectors(); + int numLiveRows = batchSize; + if (hasIsDeletedColumn) { Review Comment: What about an extra empty line to match the built-in read and separate the if block? ########## spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/data/vectorized/CometColumnarBatchReader.java: ########## @@ -0,0 +1,198 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark.data.vectorized; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.List; +import java.util.Map; +import org.apache.comet.parquet.AbstractColumnReader; +import org.apache.comet.parquet.BatchReader; +import org.apache.iceberg.Schema; +import org.apache.iceberg.data.DeleteFilter; +import org.apache.iceberg.parquet.VectorizedReader; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +import org.apache.iceberg.spark.SparkSchemaUtil; +import org.apache.iceberg.util.Pair; +import org.apache.parquet.column.page.PageReadStore; +import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; +import org.apache.parquet.hadoop.metadata.ColumnPath; +import org.apache.spark.sql.catalyst.InternalRow; +import org.apache.spark.sql.vectorized.ColumnVector; +import org.apache.spark.sql.vectorized.ColumnarBatch; + +/** + * {@link VectorizedReader} that returns Spark's {@link ColumnarBatch} to support Spark's vectorized + * read path. The {@link ColumnarBatch} returned is created by passing in the Arrow vectors + * populated via delegated read calls to {@link CometColumnReader VectorReader(s)}. + */ +@SuppressWarnings("checkstyle:VisibilityModifier") +class CometColumnarBatchReader implements VectorizedReader<ColumnarBatch> { + + private final CometColumnReader[] readers; + private final boolean hasIsDeletedColumn; + private DeleteFilter<InternalRow> deletes = null; + private long rowStartPosInBatch = 0; + // The delegated batch reader on Comet side + private final BatchReader delegate; + + CometColumnarBatchReader(List<VectorizedReader<?>> readers, Schema schema) { + this.readers = + readers.stream().map(CometColumnReader.class::cast).toArray(CometColumnReader[]::new); + this.hasIsDeletedColumn = + readers.stream().anyMatch(reader -> reader instanceof CometDeleteColumnReader); + + AbstractColumnReader[] abstractColumnReaders = new AbstractColumnReader[readers.size()]; + delegate = new BatchReader(abstractColumnReaders); + delegate.setSparkSchema(SparkSchemaUtil.convert(schema)); + } + + @Override + public void setRowGroupInfo( + PageReadStore pageStore, Map<ColumnPath, ColumnChunkMetaData> metaData, long rowPosition) { + setRowGroupInfo(pageStore, metaData); + } + + @Override + public void setRowGroupInfo( + PageReadStore pageStore, Map<ColumnPath, ColumnChunkMetaData> metaData) { + for (int i = 0; i < readers.length; i++) { + try { + if (!(readers[i] instanceof CometConstantColumnReader) + && !(readers[i] instanceof CometPositionColumnReader) + && !(readers[i] instanceof CometDeleteColumnReader)) { + readers[i].reset(); + readers[i].setPageReader(pageStore.getPageReader(readers[i].getDescriptor())); + } + } catch (IOException e) { + throw new UncheckedIOException("Failed to setRowGroupInfo for Comet vectorization", e); + } + } + + for (int i = 0; i < readers.length; i++) { + delegate.getColumnReaders()[i] = this.readers[i].getDelegate(); + } + + this.rowStartPosInBatch = + pageStore + .getRowIndexOffset() + .orElseThrow( + () -> + new IllegalArgumentException( + "PageReadStore does not contain row index offset")); + } + + public void setDeleteFilter(DeleteFilter<InternalRow> deleteFilter) { + this.deletes = deleteFilter; + } + + @Override + public final ColumnarBatch read(ColumnarBatch reuse, int numRowsToRead) { + ColumnarBatch columnarBatch = new ColumnBatchLoader(numRowsToRead).loadDataToColumnBatch(); + rowStartPosInBatch += numRowsToRead; + return columnarBatch; + } + + @Override + public void setBatchSize(int batchSize) { + for (CometColumnReader reader : readers) { + if (reader != null) { + reader.setBatchSize(batchSize); + } + } + } + + @Override + public void close() { + for (CometColumnReader reader : readers) { + if (reader != null) { + reader.close(); + } + } + } + + private class ColumnBatchLoader { + private final int batchSize; + + ColumnBatchLoader(int numRowsToRead) { + Preconditions.checkArgument( + numRowsToRead > 0, "Invalid number of rows to read: %s", numRowsToRead); + this.batchSize = numRowsToRead; + } + + ColumnarBatch loadDataToColumnBatch() { + ColumnVector[] vectors = readDataToColumnVectors(); + int numLiveRows = batchSize; + if (hasIsDeletedColumn) { Review Comment: What about an extra empty line to match the built-in reader and separate the if block? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org