This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new aeb2a13471e [SPARK-41875][CONNECT][PYTHON][TESTS][FOLLOWUP] Enable 
parity test `test_to`
aeb2a13471e is described below

commit aeb2a13471e99460e45dbf1a2cf7cc5cc9b74dbc
Author: Ruifeng Zheng <[email protected]>
AuthorDate: Fri Jan 27 12:18:41 2023 -0800

    [SPARK-41875][CONNECT][PYTHON][TESTS][FOLLOWUP] Enable parity test `test_to`
    
    ### What changes were proposed in this pull request?
    Enable parity test `test_to`
    
    ### Why are the changes needed?
    for test coverage
    
    ### Does this PR introduce _any_ user-facing change?
    no, test-only
    
    ### How was this patch tested?
    enabled test
    
    Closes #39766 from zhengruifeng/connect_enable_41875.
    
    Authored-by: Ruifeng Zheng <[email protected]>
    Signed-off-by: Dongjoon Hyun <[email protected]>
---
 python/pyspark/sql/tests/connect/test_parity_dataframe.py |  5 -----
 python/pyspark/sql/tests/test_dataframe.py                | 14 +++++++++++---
 2 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/python/pyspark/sql/tests/connect/test_parity_dataframe.py 
b/python/pyspark/sql/tests/connect/test_parity_dataframe.py
index cebe501938f..db0d727d330 100644
--- a/python/pyspark/sql/tests/connect/test_parity_dataframe.py
+++ b/python/pyspark/sql/tests/connect/test_parity_dataframe.py
@@ -100,11 +100,6 @@ class DataFrameParityTests(DataFrameTestsMixin, 
ReusedConnectTestCase):
     def test_sample(self):
         super().test_sample()
 
-    # TODO(SPARK-41875): throw proper errors in Dataset.to()
-    @unittest.skip("Fails in Spark Connect, should enable.")
-    def test_to(self):
-        super().test_to()
-
     @unittest.skip("Spark Connect does not support RDD but the tests depend on 
them.")
     def test_toDF_with_schema_string(self):
         super().test_toDF_with_schema_string()
diff --git a/python/pyspark/sql/tests/test_dataframe.py 
b/python/pyspark/sql/tests/test_dataframe.py
index 7ccb78856d2..845cf0f1fbe 100644
--- a/python/pyspark/sql/tests/test_dataframe.py
+++ b/python/pyspark/sql/tests/test_dataframe.py
@@ -43,7 +43,11 @@ from pyspark.sql.types import (
     FloatType,
     DayTimeIntervalType,
 )
-from pyspark.errors import AnalysisException, IllegalArgumentException
+from pyspark.errors import (
+    AnalysisException,
+    IllegalArgumentException,
+    SparkConnectAnalysisException,
+)
 from pyspark.testing.sqlutils import (
     ReusedSQLTestCase,
     SQLTestUtils,
@@ -1503,13 +1507,17 @@ class DataFrameTestsMixin:
         # incompatible field nullability
         schema4 = StructType([StructField("j", LongType(), False)])
         self.assertRaisesRegex(
-            AnalysisException, "NULLABLE_COLUMN_OR_FIELD", lambda: 
df.to(schema4)
+            (AnalysisException, SparkConnectAnalysisException),
+            "NULLABLE_COLUMN_OR_FIELD",
+            lambda: df.to(schema4).count(),
         )
 
         # field cannot upcast
         schema5 = StructType([StructField("i", LongType())])
         self.assertRaisesRegex(
-            AnalysisException, "INVALID_COLUMN_OR_FIELD_DATA_TYPE", lambda: 
df.to(schema5)
+            (AnalysisException, SparkConnectAnalysisException),
+            "INVALID_COLUMN_OR_FIELD_DATA_TYPE",
+            lambda: df.to(schema5).count(),
         )
 
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to