huaxingao commented on code in PR #10659: URL: https://github.com/apache/iceberg/pull/10659#discussion_r1683657141
########## spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkScan.java: ########## @@ -175,7 +184,37 @@ public Statistics estimateStatistics() { protected Statistics estimateStatistics(Snapshot snapshot) { // its a fresh table, no data if (snapshot == null) { - return new Stats(0L, 0L); + return new Stats(0L, 0L, Collections.emptyMap()); + } + + boolean cboEnabled = + Boolean.parseBoolean(spark.conf().get(SQLConf.CBO_ENABLED().key(), "false")); + Map<NamedReference, ColumnStatistics> colStatsMap = null; + if (readConf.enableColumnStats() && cboEnabled) { + colStatsMap = Maps.newHashMap(); + List<StatisticsFile> files = table.statisticsFiles(); + if (!files.isEmpty()) { + List<BlobMetadata> metadataList = (files.get(0)).blobMetadata(); + + for (BlobMetadata blobMetadata : metadataList) { + int id = blobMetadata.fields().get(0); + String colName = table.schema().findColumnName(id); + NamedReference ref = FieldReference.column(colName); + + long ndv = 0; + String ndvStr = blobMetadata.properties().get("ndv"); + if (ndvStr != null && !ndvStr.isEmpty()) { + ndv = Long.parseLong(blobMetadata.properties().get("ndv")); + } else { + LOG.debug("ndv is not set in BlobMetadata for column {}", colName); + } + + // TODO: Fill min, max and null from the manifest file + ColumnStatistics colStats = new SparkColumnStatistics(ndv, null, null, 0L, 0L, 0L, null); Review Comment: Checked Spark code, should be `None` if ndv is not available. I changed the code accordingly. ########## spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkScan.java: ########## @@ -175,7 +184,37 @@ public Statistics estimateStatistics() { protected Statistics estimateStatistics(Snapshot snapshot) { // its a fresh table, no data if (snapshot == null) { - return new Stats(0L, 0L); + return new Stats(0L, 0L, Collections.emptyMap()); + } + + boolean cboEnabled = + Boolean.parseBoolean(spark.conf().get(SQLConf.CBO_ENABLED().key(), "false")); + Map<NamedReference, ColumnStatistics> colStatsMap = null; + if (readConf.enableColumnStats() && cboEnabled) { + colStatsMap = Maps.newHashMap(); + List<StatisticsFile> files = table.statisticsFiles(); + if (!files.isEmpty()) { + List<BlobMetadata> metadataList = (files.get(0)).blobMetadata(); + + for (BlobMetadata blobMetadata : metadataList) { + int id = blobMetadata.fields().get(0); + String colName = table.schema().findColumnName(id); + NamedReference ref = FieldReference.column(colName); + + long ndv = 0; + String ndvStr = blobMetadata.properties().get("ndv"); Review Comment: Changed. Thanks ########## spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkScan.java: ########## @@ -734,6 +795,19 @@ private Expression[] expressions(Expression... expressions) { return expressions; } + private void checkStatistics(SparkScan scan, long expectedRowCount, boolean expectedColumnStats) { Review Comment: Added the check. Thanks -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org