This is an automated email from the ASF dual-hosted git repository.

yangjie01 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 4cc41ea63f94 [SPARK-48943][TESTS] Upgrade `h2` to 2.3.230 and enhance 
the test coverage of behavior changes of `asin` and `acos` complying Standard 
SQL
4cc41ea63f94 is described below

commit 4cc41ea63f943b61be8f771f5cd95cfd4ea15c2e
Author: Wei Guo <[email protected]>
AuthorDate: Tue Jul 23 10:21:32 2024 +0800

    [SPARK-48943][TESTS] Upgrade `h2` to 2.3.230 and enhance the test coverage 
of behavior changes of `asin` and `acos` complying Standard SQL
    
    ### What changes were proposed in this pull request?
    
    This PR aims to upgrade `h2` from 2.2.220 to 2.3.230 and enhance the test 
coverage of behavior changes of `asin` and `acos` complying Standard SQL.
    
    The detail of behavior changes as follows:
    After this commit( 
https://github.com/h2database/h2database/commit/186647d4a35d05681febf4f53502b306aa6d511a),
 the behavior of `asin` and `acos` has changed in h2, complying with Standard 
SQL, and throwing exceptions directly when the argument is invalid(< -1d || > 
1d).
    
    ### Why are the changes needed?
    
    2.3.230 is latest version of `h2`, there are a lot of bug fixes and 
improvements.
    Full change notes:
    https://www.h2database.com/html/changelog.html
    
    ### Does this PR introduce _any_ user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    Update a exist test case and add a new test case.
    Pass GA and manually test `JDBCV2Suite`.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    No.
    
    Closes #47414 from wayneguow/upgrade_h2.
    
    Authored-by: Wei Guo <[email protected]>
    Signed-off-by: yangjie01 <[email protected]>
---
 connect/server/pom.xml                             |  2 +-
 sql/core/pom.xml                                   |  2 +-
 .../org/apache/spark/sql/jdbc/JDBCV2Suite.scala    | 58 +++++++++++++++-------
 3 files changed, 42 insertions(+), 20 deletions(-)

diff --git a/connect/server/pom.xml b/connect/server/pom.xml
index 73a3310c8a38..ecbb22168aa1 100644
--- a/connect/server/pom.xml
+++ b/connect/server/pom.xml
@@ -254,7 +254,7 @@
     <dependency>
       <groupId>com.h2database</groupId>
       <artifactId>h2</artifactId>
-      <version>2.2.220</version>
+      <version>2.3.230</version>
       <scope>test</scope>
     </dependency>
     <dependency>
diff --git a/sql/core/pom.xml b/sql/core/pom.xml
index 59d798e6e62f..c891763eb4e1 100644
--- a/sql/core/pom.xml
+++ b/sql/core/pom.xml
@@ -166,7 +166,7 @@
     <dependency>
       <groupId>com.h2database</groupId>
       <artifactId>h2</artifactId>
-      <version>2.2.220</version>
+      <version>2.3.230</version>
       <scope>test</scope>
     </dependency>
     <dependency>
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala
index e1a7971b283c..db06aac7f5e0 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala
@@ -37,7 +37,7 @@ import org.apache.spark.sql.connector.expressions.Expression
 import org.apache.spark.sql.execution.FormattedMode
 import 
org.apache.spark.sql.execution.datasources.v2.{DataSourceV2ScanRelation, 
V1ScanWrapper}
 import org.apache.spark.sql.execution.datasources.v2.jdbc.JDBCTableCatalog
-import org.apache.spark.sql.functions.{abs, acos, asin, atan, atan2, avg, 
ceil, coalesce, cos, cosh, cot, count, count_distinct, degrees, exp, floor, 
lit, log => logarithm, log10, not, pow, radians, round, signum, sin, sinh, 
sqrt, sum, tan, tanh, udf, when}
+import org.apache.spark.sql.functions.{abs, acos, asin, avg, ceil, coalesce, 
count, count_distinct, degrees, exp, floor, lit, log => logarithm, log10, not, 
pow, radians, round, signum, sqrt, sum, udf, when}
 import org.apache.spark.sql.internal.SQLConf
 import org.apache.spark.sql.test.SharedSparkSession
 import org.apache.spark.sql.types.{DataType, IntegerType, StringType}
@@ -1258,25 +1258,29 @@ class JDBCV2Suite extends QueryTest with 
SharedSparkSession with ExplainSuiteHel
     checkAnswer(df15, Seq(Row(1, "cathy", 9000, 1200, false),
       Row(2, "alex", 12000, 1200, false), Row(6, "jen", 12000, 1200, true)))
 
-    val df16 = spark.table("h2.test.employee")
-      .filter(sin($"bonus") < -0.08)
-      .filter(sinh($"bonus") > 200)
-      .filter(cos($"bonus") > 0.9)
-      .filter(cosh($"bonus") > 200)
-      .filter(tan($"bonus") < -0.08)
-      .filter(tanh($"bonus") === 1)
-      .filter(cot($"bonus") < -11)
-      .filter(asin($"bonus") > 0.1)
-      .filter(acos($"bonus") > 1.4)
-      .filter(atan($"bonus") > 1.4)
-      .filter(atan2($"bonus", $"bonus") > 0.7)
+    val df16 = sql(
+      """
+        |SELECT * FROM h2.test.employee
+        |WHERE sin(bonus) < -0.08
+        |AND sinh(bonus) > 200
+        |AND cos(bonus) > 0.9
+        |AND cosh(bonus) > 200
+        |AND tan(bonus) < -0.08
+        |AND tanh(bonus) = 1
+        |AND cot(bonus) < -11
+        |AND asin(bonus / salary) > 0.13
+        |AND acos(bonus / salary) < 1.47
+        |AND atan(bonus) > 1.4
+        |AND atan2(bonus, bonus) > 0.7
+        |""".stripMargin)
     checkFiltersRemoved(df16)
     checkPushedInfo(df16, "PushedFilters: [" +
-      "BONUS IS NOT NULL, SIN(BONUS) < -0.08, SINH(BONUS) > 200.0, COS(BONUS) 
> 0.9, " +
-      "COSH(BONUS) > 200.0, TAN(BONUS) < -0.08, TANH(BONUS) = 1.0, COT(BONUS) 
< -11.0, " +
-      "ASIN(BONUS) > 0.1, ACOS(BONUS) > 1.4, ATAN(BONUS) > 1.4, (ATAN2(BONUS, 
BONUS)) > 0.7],")
-    checkAnswer(df16, Seq(Row(1, "cathy", 9000, 1200, false),
-      Row(2, "alex", 12000, 1200, false), Row(6, "jen", 12000, 1200, true)))
+      "BONUS IS NOT NULL, SALARY IS NOT NULL, SIN(BONUS) < -0.08, SINH(BONUS) 
> 200.0, " +
+      "COS(BONUS) > 0.9, COSH(BONUS) > 200.0, TAN(BONUS) < -0.08, TANH(BONUS) 
= 1.0, " +
+      "COT(BONUS) < -11.0, ASIN(BONUS / CAST(SALARY AS double)) > 0.13, " +
+      "ACOS(BONUS / CAST(SALARY AS double)) < 1.47, " +
+      "ATAN(BONUS) > 1.4, (ATAN2(BONUS, BONUS)) > 0.7],")
+    checkAnswer(df16, Seq(Row(1, "cathy", 9000, 1200, false)))
 
     // H2 does not support log2, asinh, acosh, atanh, cbrt
     val df17 = sql(
@@ -1294,6 +1298,24 @@ class JDBCV2Suite extends QueryTest with 
SharedSparkSession with ExplainSuiteHel
     checkAnswer(df17, Seq(Row(6, "jen", 12000, 1200, true)))
   }
 
+  test("SPARK-48943: arguments for asin and acos are invalid (< -1 || > 1) in 
H2") {
+    val df1 = spark.table("h2.test.employee").filter(acos($"bonus") > 1.4)
+    val e1 = intercept[SparkException] {
+      checkAnswer(df1, Seq(Row(1, "cathy", 9000, 1200, false)))
+    }
+    assert(e1.getCause.getClass === classOf[org.h2.jdbc.JdbcSQLDataException])
+    assert(e1.getCause.getMessage.contains("Invalid value")
+      && e1.getCause.getMessage.contains("ACOS"))
+
+    val df2 = spark.table("h2.test.employee").filter(asin($"bonus") > 0.1)
+    val e2 = intercept[SparkException] {
+      checkAnswer(df2, Seq(Row(1, "cathy", 9000, 1200, false)))
+    }
+    assert(e2.getCause.getClass === classOf[org.h2.jdbc.JdbcSQLDataException])
+    assert(e2.getCause.getMessage.contains("Invalid value")
+      && e2.getCause.getMessage.contains("ASIN"))
+  }
+
   test("SPARK-38432: escape the single quote, _ and % for DS V2 pushdown") {
     val df1 = 
spark.table("h2.test.address").filter($"email".startsWith("abc_"))
     checkFiltersRemoved(df1)


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to