rdblue commented on code in PR #6622:
URL: https://github.com/apache/iceberg/pull/6622#discussion_r1103897385


##########
spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java:
##########
@@ -158,6 +179,129 @@ public Filter[] pushedFilters() {
     return pushedFilters;
   }
 
+  @Override
+  public boolean pushAggregation(Aggregation aggregation) {
+    if (!canPushDownAggregation(aggregation)) {
+      return false;
+    }
+
+    AggregateEvaluator aggregateEvaluator;
+    try {
+      List<Expression> aggregates =
+          Arrays.stream(aggregation.aggregateExpressions())
+              .map(agg -> SparkAggregates.convert(agg))
+              .collect(Collectors.toList());
+      aggregateEvaluator = AggregateEvaluator.create(schema, aggregates);
+    } catch (UnsupportedOperationException | IllegalArgumentException e) {
+      LOG.info("Skipped aggregate pushdown: " + e);
+      return false;
+    }
+
+    if 
(!metricsModeSupportsAggregatePushDown(aggregateEvaluator.aggregates())) {
+      return false;
+    }
+
+    TableScanContext context = new TableScanContext().reportWith(((BaseTable) 
table).getReporter());
+    context.setColStats(true);
+    TableScan scan = new DataTableScan(table, table.schema(), context);
+    Snapshot snapshot = readSnapshot();
+    if (snapshot == null) {
+      LOG.info("Skipped aggregate pushdown: table snapshot is null");
+      return false;
+    }
+    scan = scan.useSnapshot(snapshot.snapshotId());
+    scan = configureSplitPlanning(scan);
+
+    try (CloseableIterable<FileScanTask> fileScanTasks = scan.planFiles()) {
+      List<FileScanTask> tasks = ImmutableList.copyOf(fileScanTasks);
+      for (FileScanTask task : tasks) {
+        if (!task.deletes().isEmpty()) {
+          LOG.info("Skipped aggregate pushdown: detected row level deletes.");
+          return false;
+        }
+        if (!aggregateEvaluator.update(task.file())) {
+          return false;
+        }
+      }
+    } catch (IOException e) {
+      LOG.info("Skipped aggregate pushdown: " + e);
+      return false;
+    }
+
+    pushedAggregateSchema =
+        SparkSchemaUtil.convert(new 
Schema(aggregateEvaluator.resultType().fields()));
+    this.pushedAggregateRows = new InternalRow[1];
+    StructLike structLike = aggregateEvaluator.result();
+    this.pushedAggregateRows[0] =
+        new 
StructInternalRow(aggregateEvaluator.resultType()).setStruct(structLike);
+    return true;
+  }
+
+  private boolean canPushDownAggregation(Aggregation aggregation) {
+    if (!(table instanceof BaseTable)) {
+      return false;
+    }
+
+    if (!readConf.aggregatePushDownEnabled()) {
+      return false;
+    }
+
+    // If group by expression is the same as the partition, the statistics 
information can still
+    // be used to calculate min/max/count, will enable aggregate push down in 
next phase.
+    // TODO: enable aggregate push down for partition col group by expression
+    if (aggregation.groupByExpressions().length > 0) {
+      LOG.info("Skipped aggregate pushdown: group by aggregation push down is 
not supported.");

Review Comment:
   Nit: ending punctuation in a log message.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to