corleyma commented on code in PR #1427:
URL: https://github.com/apache/iceberg-python/pull/1427#discussion_r1884533676


##########
pyiceberg/table/__init__.py:
##########
@@ -1423,6 +1451,66 @@ def plan_files(self) -> Iterable[FileScanTask]:
             for data_entry in data_entries
         ]
 
+    def _target_split_size(self) -> int:
+        table_value = property_as_int(
+            self.table_metadata.properties, TableProperties.READ_SPLIT_SIZE, 
TableProperties.READ_SPLIT_SIZE_DEFAULT
+        )
+        return property_as_int(self.options, TableProperties.READ_SPLIT_SIZE, 
table_value)  # type: ignore
+
+    def _loop_back(self) -> int:
+        table_value = property_as_int(
+            self.table_metadata.properties, 
TableProperties.READ_SPLIT_LOOKBACK, TableProperties.READ_SPLIT_LOOKBACK_DEFAULT
+        )
+        return property_as_int(self.options, 
TableProperties.READ_SPLIT_LOOKBACK, table_value)  # type: ignore
+
+    def _split_open_file_cost(self) -> int:
+        table_value = property_as_int(
+            self.table_metadata.properties,
+            TableProperties.READ_SPLIT_OPEN_FILE_COST,
+            TableProperties.READ_SPLIT_OPEN_FILE_COST_DEFAULT,
+        )
+        return property_as_int(self.options, 
TableProperties.READ_SPLIT_OPEN_FILE_COST, table_value)  # type: ignore
+
+    def plan_task(self) -> Iterable[CombinedFileScanTask]:

Review Comment:
   I assume it's intentional that we're not actually calling this in any of the 
methods (like to_arrow, etc) that actually execute a scan?
   
   If the plan is to call this in `to_arrow` eventually, it would be good to 
have some benchmarks with realistic latencies (e.g., against actual object 
storage).
   
   If there is no plan to call this directly in pyiceberg, I wonder who the 
intended consumers of this API would be?  I would expect most query engines  -- 
distributed or otherwise -- to have their own notions for how to optimize scan 
planning.



##########
pyiceberg/table/__init__.py:
##########
@@ -1423,6 +1451,66 @@ def plan_files(self) -> Iterable[FileScanTask]:
             for data_entry in data_entries
         ]
 
+    def _target_split_size(self) -> int:
+        table_value = property_as_int(
+            self.table_metadata.properties, TableProperties.READ_SPLIT_SIZE, 
TableProperties.READ_SPLIT_SIZE_DEFAULT
+        )
+        return property_as_int(self.options, TableProperties.READ_SPLIT_SIZE, 
table_value)  # type: ignore
+
+    def _loop_back(self) -> int:
+        table_value = property_as_int(
+            self.table_metadata.properties, 
TableProperties.READ_SPLIT_LOOKBACK, TableProperties.READ_SPLIT_LOOKBACK_DEFAULT
+        )
+        return property_as_int(self.options, 
TableProperties.READ_SPLIT_LOOKBACK, table_value)  # type: ignore
+
+    def _split_open_file_cost(self) -> int:
+        table_value = property_as_int(
+            self.table_metadata.properties,
+            TableProperties.READ_SPLIT_OPEN_FILE_COST,
+            TableProperties.READ_SPLIT_OPEN_FILE_COST_DEFAULT,
+        )
+        return property_as_int(self.options, 
TableProperties.READ_SPLIT_OPEN_FILE_COST, table_value)  # type: ignore
+
+    def plan_task(self) -> Iterable[CombinedFileScanTask]:

Review Comment:
   I assume it's intentional that we're not actually calling this in any of the 
methods (like `to_arrow`, etc) that actually execute a scan?
   
   If the plan is to call this in `to_arrow` eventually, it would be good to 
have some benchmarks with realistic latencies (e.g., against actual object 
storage).
   
   If there is no plan to call this directly in pyiceberg, I wonder who the 
intended consumers of this API would be?  I would expect most query engines  -- 
distributed or otherwise -- to have their own notions for how to optimize scan 
planning.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to