This is an automated email from the ASF dual-hosted git repository.

chrispeck pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/pinot.git


The following commit(s) were added to refs/heads/master by this push:
     new a5bc956fb0 [multistage] Handle Column Less Tuples in BlockSplitter 
(#16058)
a5bc956fb0 is described below

commit a5bc956fb08d2b9b9b91fb3861041a91d09ad0a4
Author: Ankit Sultana <ankitsult...@uber.com>
AuthorDate: Mon Jun 9 21:56:54 2025 -0500

    [multistage] Handle Column Less Tuples in BlockSplitter (#16058)
---
 .../main/java/org/apache/pinot/query/runtime/blocks/BlockSplitter.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/pinot-query-runtime/src/main/java/org/apache/pinot/query/runtime/blocks/BlockSplitter.java
 
b/pinot-query-runtime/src/main/java/org/apache/pinot/query/runtime/blocks/BlockSplitter.java
index 28b673be55..9a4195ef32 100644
--- 
a/pinot-query-runtime/src/main/java/org/apache/pinot/query/runtime/blocks/BlockSplitter.java
+++ 
b/pinot-query-runtime/src/main/java/org/apache/pinot/query/runtime/blocks/BlockSplitter.java
@@ -67,7 +67,7 @@ public interface BlockSplitter {
       // Use estimated row size, this estimate is not accurate and is used to 
estimate numRowsPerChunk only.
       DataSchema dataSchema = block.getDataSchema();
       assert dataSchema != null;
-      int estimatedRowSizeInBytes = dataSchema.getColumnNames().length * 
MEDIAN_COLUMN_SIZE_BYTES;
+      int estimatedRowSizeInBytes = Math.max(1, 
dataSchema.getColumnNames().length * MEDIAN_COLUMN_SIZE_BYTES);
       int numRowsPerChunk = maxBlockSize / estimatedRowSizeInBytes;
       Preconditions.checkState(numRowsPerChunk > 0, "row size too large for 
query engine to handle, abort!");
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org
For additional commands, e-mail: commits-h...@pinot.apache.org

Reply via email to