huaxingao commented on code in PR #6622:
URL: https://github.com/apache/iceberg/pull/6622#discussion_r1096627129


##########
spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java:
##########
@@ -158,6 +182,141 @@ public Filter[] pushedFilters() {
     return pushedFilters;
   }
 
+  @Override
+  public boolean pushAggregation(Aggregation aggregation) {
+    if (!pushDownAggregate(aggregation)) {
+      return false;
+    }
+
+    AggregateEvaluator aggregateEvaluator;
+    try {
+      List<Expression> aggregates =
+          Arrays.stream(aggregation.aggregateExpressions())
+              .map(agg -> SparkAggregates.convert(agg))
+              .collect(Collectors.toList());
+      aggregateEvaluator = AggregateEvaluator.create(schema, aggregates);
+    } catch (Exception e) {
+      LOG.info("Can't push down aggregates: " + e.getMessage());
+      return false;
+    }
+
+    if 
(!metricsModeSupportsAggregatePushDown(aggregateEvaluator.aggregates())) {
+      LOG.info("The MetricsMode doesn't support aggregate push down.");
+      return false;
+    }
+
+    List<ManifestFile> manifests = getSnapshot().allManifests(table.io());
+
+    for (ManifestFile manifest : manifests) {
+      try (ManifestReader<DataFile> reader = ManifestFiles.read(manifest, 
table.io())) {
+        for (DataFile dataFile : reader) {
+          aggregateEvaluator.update(dataFile.copy());
+        }
+      } catch (IOException e) {
+        LOG.info("Can't push down aggregates: " + e.getMessage());
+        return false;
+      }
+    }
+
+    Object[] res = aggregateEvaluator.result();
+    applyDataTypeConversionIfNecessary(res);
+
+    List<Object> valuesInSparkInternalRow = java.util.Arrays.asList(res);
+    this.pushedAggregateRows = new InternalRow[1];
+    pushedAggregateRows[0] =
+        
InternalRow.fromSeq(JavaConverters.asScalaBuffer(valuesInSparkInternalRow).toSeq());
+    pushedAggregateSchema =
+        SparkSchemaUtil.convert(new 
Schema(aggregateEvaluator.resultType().fields()));
+    return true;
+  }
+
+  private boolean pushDownAggregate(Aggregation aggregation) {
+    if (!(table instanceof BaseTable)) {
+      return false;
+    }
+
+    if (!readConf.aggregatePushDown()) {
+      return false;
+    }
+
+    Snapshot snapshot = getSnapshot();
+    if (snapshot == null) {
+      return false;
+    } else {
+      Map<String, String> map = snapshot.summary();
+      // if there are row-level deletes in current snapshot, the statistics
+      // maybe changed, so disable push down aggregate.
+      if (Integer.parseInt(map.getOrDefault("total-position-deletes", "0")) > 0
+          || Integer.parseInt(map.getOrDefault("total-equality-deletes", "0")) 
> 0) {
+        LOG.info("Cannot push down aggregates when row level deletes exist.)");

Review Comment:
   I have changed the code to check the deletes in the tasks and abort the push 
down if deletes are present.
   
   I also agree it may be better to introduce another setting to get an 
approximate number if there are deletes. Probably we can do this in a follow up 
PR. 



##########
spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java:
##########
@@ -158,6 +182,141 @@ public Filter[] pushedFilters() {
     return pushedFilters;
   }
 
+  @Override
+  public boolean pushAggregation(Aggregation aggregation) {
+    if (!pushDownAggregate(aggregation)) {
+      return false;
+    }
+
+    AggregateEvaluator aggregateEvaluator;
+    try {
+      List<Expression> aggregates =
+          Arrays.stream(aggregation.aggregateExpressions())
+              .map(agg -> SparkAggregates.convert(agg))
+              .collect(Collectors.toList());
+      aggregateEvaluator = AggregateEvaluator.create(schema, aggregates);
+    } catch (Exception e) {
+      LOG.info("Can't push down aggregates: " + e.getMessage());
+      return false;
+    }
+
+    if 
(!metricsModeSupportsAggregatePushDown(aggregateEvaluator.aggregates())) {
+      LOG.info("The MetricsMode doesn't support aggregate push down.");
+      return false;
+    }
+
+    List<ManifestFile> manifests = getSnapshot().allManifests(table.io());
+
+    for (ManifestFile manifest : manifests) {
+      try (ManifestReader<DataFile> reader = ManifestFiles.read(manifest, 
table.io())) {
+        for (DataFile dataFile : reader) {
+          aggregateEvaluator.update(dataFile.copy());
+        }
+      } catch (IOException e) {
+        LOG.info("Can't push down aggregates: " + e.getMessage());
+        return false;
+      }
+    }
+
+    Object[] res = aggregateEvaluator.result();
+    applyDataTypeConversionIfNecessary(res);
+
+    List<Object> valuesInSparkInternalRow = java.util.Arrays.asList(res);
+    this.pushedAggregateRows = new InternalRow[1];
+    pushedAggregateRows[0] =
+        
InternalRow.fromSeq(JavaConverters.asScalaBuffer(valuesInSparkInternalRow).toSeq());
+    pushedAggregateSchema =
+        SparkSchemaUtil.convert(new 
Schema(aggregateEvaluator.resultType().fields()));
+    return true;
+  }
+
+  private boolean pushDownAggregate(Aggregation aggregation) {
+    if (!(table instanceof BaseTable)) {
+      return false;
+    }
+
+    if (!readConf.aggregatePushDown()) {
+      return false;
+    }
+
+    Snapshot snapshot = getSnapshot();
+    if (snapshot == null) {
+      return false;
+    } else {
+      Map<String, String> map = snapshot.summary();
+      // if there are row-level deletes in current snapshot, the statistics
+      // maybe changed, so disable push down aggregate.
+      if (Integer.parseInt(map.getOrDefault("total-position-deletes", "0")) > 0
+          || Integer.parseInt(map.getOrDefault("total-equality-deletes", "0")) 
> 0) {
+        LOG.info("Cannot push down aggregates when row level deletes exist.)");
+        return false;
+      }
+    }
+
+    // If group by expression is the same as the partition, the statistics 
information can still
+    // be used to calculate min/max/count, will enable aggregate push down in 
next phase.
+    // TODO: enable aggregate push down for partition col group by expression
+    if (aggregation.groupByExpressions().length > 0) {
+      LOG.info("Group by aggregation push down is not supported yet.");

Review Comment:
   Fixed. thanks



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to