This is an automated email from the ASF dual-hosted git repository.

ruifengz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 676cac092540 [SPARK-51915][PYTHON][CONNECT][TESTS] Enable 
SparkConnectDataFrameDebug in connect-only mode
676cac092540 is described below

commit 676cac092540755e04a5ba7eaaf1e5aea8138930
Author: Ruifeng Zheng <ruife...@apache.org>
AuthorDate: Fri Apr 25 18:07:03 2025 +0800

    [SPARK-51915][PYTHON][CONNECT][TESTS] Enable SparkConnectDataFrameDebug in 
connect-only mode
    
    ### What changes were proposed in this pull request?
    Enable SparkConnectDataFrameDebug in connect-only mode
    
    ### Why are the changes needed?
    to improve test coverage
    
    ### Does this PR introduce _any_ user-facing change?
    no
    
    ### How was this patch tested?
    CI
    
    ### Was this patch authored or co-authored using generative AI tooling?
    no
    
    Closes #50710 from zhengruifeng/connect-only-df-debug.
    
    Authored-by: Ruifeng Zheng <ruife...@apache.org>
    Signed-off-by: Ruifeng Zheng <ruife...@apache.org>
---
 python/pyspark/sql/tests/connect/test_df_debug.py | 20 ++++++++------------
 1 file changed, 8 insertions(+), 12 deletions(-)

diff --git a/python/pyspark/sql/tests/connect/test_df_debug.py 
b/python/pyspark/sql/tests/connect/test_df_debug.py
index 40b6a072e912..44ff85e2f9a9 100644
--- a/python/pyspark/sql/tests/connect/test_df_debug.py
+++ b/python/pyspark/sql/tests/connect/test_df_debug.py
@@ -17,17 +17,13 @@
 
 import unittest
 
-from pyspark.sql.tests.connect.test_connect_basic import 
SparkConnectSQLTestCase
-from pyspark.testing.connectutils import should_test_connect
+from pyspark.testing.connectutils import ReusedConnectTestCase
 from pyspark.testing.utils import have_graphviz, graphviz_requirement_message
 
-if should_test_connect:
-    from pyspark.sql.connect.dataframe import DataFrame
 
-
-class SparkConnectDataFrameDebug(SparkConnectSQLTestCase):
+class SparkConnectDataFrameDebug(ReusedConnectTestCase):
     def test_df_debug_basics(self):
-        df: DataFrame = 
self.connect.range(100).repartition(10).groupBy("id").count()
+        df = self.spark.range(100).repartition(10).groupBy("id").count()
         x = df.collect()  # noqa: F841
         ei = df.executionInfo
 
@@ -35,12 +31,12 @@ class SparkConnectDataFrameDebug(SparkConnectSQLTestCase):
         self.assertIn(root, graph, "The root must be rooted in the graph")
 
     def test_df_quey_execution_empty_before_execution(self):
-        df: DataFrame = 
self.connect.range(100).repartition(10).groupBy("id").count()
+        df = self.spark.range(100).repartition(10).groupBy("id").count()
         ei = df.executionInfo
         self.assertIsNone(ei, "The query execution must be None before the 
action is executed")
 
     def test_df_query_execution_with_writes(self):
-        df: DataFrame = 
self.connect.range(100).repartition(10).groupBy("id").count()
+        df = self.spark.range(100).repartition(10).groupBy("id").count()
         df.write.save("/tmp/test_df_query_execution_with_writes", 
format="json", mode="overwrite")
         ei = df.executionInfo
         self.assertIsNotNone(
@@ -48,18 +44,18 @@ class SparkConnectDataFrameDebug(SparkConnectSQLTestCase):
         )
 
     def test_query_execution_text_format(self):
-        df: DataFrame = 
self.connect.range(100).repartition(10).groupBy("id").count()
+        df = self.spark.range(100).repartition(10).groupBy("id").count()
         df.collect()
         self.assertIn("HashAggregate", df.executionInfo.metrics.toText())
 
         # Different execution mode.
-        df: DataFrame = 
self.connect.range(100).repartition(10).groupBy("id").count()
+        df = self.spark.range(100).repartition(10).groupBy("id").count()
         df.toPandas()
         self.assertIn("HashAggregate", df.executionInfo.metrics.toText())
 
     @unittest.skipIf(not have_graphviz, graphviz_requirement_message)
     def test_df_query_execution_metrics_to_dot(self):
-        df: DataFrame = 
self.connect.range(100).repartition(10).groupBy("id").count()
+        df = self.spark.range(100).repartition(10).groupBy("id").count()
         x = df.collect()  # noqa: F841
         ei = df.executionInfo
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to