huaxingao commented on code in PR #11551:
URL: https://github.com/apache/iceberg/pull/11551#discussion_r1865249456
##########
spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkReaderDeletes.java:
##########
@@ -622,6 +623,50 @@ public void
testPosDeletesOnParquetFileWithMultipleRowGroups() throws IOExceptio
assertThat(rowSet(tblName, tbl, "*")).hasSize(193);
}
+ @TestTemplate
+ public void testEqualityDeleteWithDifferentScanAndDeleteColumns() throws
IOException {
+ assumeThat(format).isEqualTo(FileFormat.PARQUET);
+ initDateTable();
+
+ Schema deleteRowSchema = dateTable.schema().select("dt");
+ Record dataDelete = GenericRecord.create(deleteRowSchema);
+ List<Record> dataDeletes =
+ Lists.newArrayList(
+ dataDelete.copy("dt", LocalDate.parse("2021-09-01")),
+ dataDelete.copy("dt", LocalDate.parse("2021-09-02")),
+ dataDelete.copy("dt", LocalDate.parse("2021-09-03")));
+
+ DeleteFile eqDeletes =
+ FileHelpers.writeDeleteFile(
+ dateTable,
+ Files.localOutput(File.createTempFile("junit", null,
temp.toFile())),
+ TestHelpers.Row.of(0),
+ dataDeletes.subList(0, 3),
+ deleteRowSchema);
+
+ dateTable.newRowDelta().addDeletes(eqDeletes).commit();
+
+ CloseableIterable<CombinedScanTask> tasks =
+ TableScanUtil.planTasks(
+ dateTable.newScan().planFiles(),
+ TableProperties.METADATA_SPLIT_SIZE_DEFAULT,
+ TableProperties.SPLIT_LOOKBACK_DEFAULT,
+ TableProperties.SPLIT_OPEN_FILE_COST_DEFAULT);
+
+ for (CombinedScanTask task : tasks) {
+ try (BatchDataReader reader =
+ new BatchDataReader(
+ // expected column is id, while the equality filter column is dt
+ dateTable, task, dateTable.schema(),
dateTable.schema().select("id"), false, 7)) {
+ while (reader.next()) {
+ ColumnarBatch columnarBatch = reader.get();
+ int numOfCols = columnarBatch.numCols();
+ assertThat(numOfCols).as("Number of columns").isEqualTo(1);
Review Comment:
Added. Thanks!
##########
spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/data/vectorized/ColumnarBatchReader.java:
##########
@@ -245,5 +269,20 @@ void applyEqDelete(ColumnarBatch columnarBatch) {
columnarBatch.setNumRows(currentRowId);
}
+
+ ColumnarBatch removeExtraColumns(
+ ColumnVector[] arrowColumnVectors, ColumnarBatch columnarBatch) {
+ int numOfExtraColumns = numOfExtraColumns(deletes);
+ if (numOfExtraColumns > 0) {
+ int newLength = arrowColumnVectors.length - numOfExtraColumns;
+ // In DeleteFilter.fileProjection, the columns for missingIds (the
columns required
+ // for equality delete or ROW_POSITION) are appended to the end of the
expectedSchema.
+ // Therefore, these extra columns can be removed from the end of
arrowColumnVectors.
+ ColumnVector[] newColumns = Arrays.copyOf(arrowColumnVectors,
newLength);
+ return new ColumnarBatch(newColumns, columnarBatch.numRows());
+ } else {
+ return columnarBatch;
+ }
+ }
Review Comment:
Good suggestion! Changed.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]