This is an automated email from the ASF dual-hosted git repository.
ruifengz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new d69b9c4e0301 [SPARK-54079][GEO][SQL][FOLLOWUP] Fix catalyst
expressions ST package and imports
d69b9c4e0301 is described below
commit d69b9c4e03013348f1c7f838b2b477b936fb9921
Author: Uros Bojanic <[email protected]>
AuthorDate: Mon Nov 3 10:16:20 2025 +0800
[SPARK-54079][GEO][SQL][FOLLOWUP] Fix catalyst expressions ST package and
imports
### What changes were proposed in this pull request?
Following up on the original PR
(https://github.com/apache/spark/pull/52784), this PR fixes catalyst
expressions ST package and imports.
### Why are the changes needed?
Use proper package and referencing.
### Does this PR introduce _any_ user-facing change?
No.
### How was this patch tested?
Existing tests suffice.
### Was this patch authored or co-authored using generative AI tooling?
No.
Closes #52807 from uros-db/geo-st-expressions-package.
Authored-by: Uros Bojanic <[email protected]>
Signed-off-by: Ruifeng Zheng <[email protected]>
---
.../org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala | 1 +
.../apache/spark/sql/catalyst/expressions/st/stExpressions.scala | 3 ++-
.../src/test/resources/sql-functions/sql-expression-schema.md | 8 ++++----
.../src/test/scala/org/apache/spark/sql/STExpressionsSuite.scala | 1 +
4 files changed, 8 insertions(+), 5 deletions(-)
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
index bb98e5fa02ed..3098832774d1 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
@@ -30,6 +30,7 @@ import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
+import org.apache.spark.sql.catalyst.expressions.st._
import org.apache.spark.sql.catalyst.expressions.variant._
import org.apache.spark.sql.catalyst.expressions.xml._
import org.apache.spark.sql.catalyst.plans.logical.{FunctionBuilderBase,
Generate, LogicalPlan, OneRowRelation, Range}
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/st/stExpressions.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/st/stExpressions.scala
index a08f88b67952..135a7f7d61f2 100755
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/st/stExpressions.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/st/stExpressions.scala
@@ -15,8 +15,9 @@
* limitations under the License.
*/
-package org.apache.spark.sql.catalyst.expressions
+package org.apache.spark.sql.catalyst.expressions.st
+import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.objects._
import org.apache.spark.sql.catalyst.trees._
import org.apache.spark.sql.catalyst.util.{Geography, Geometry, STUtils}
diff --git a/sql/core/src/test/resources/sql-functions/sql-expression-schema.md
b/sql/core/src/test/resources/sql-functions/sql-expression-schema.md
index 29d194f5715e..d9e4a960a2d7 100644
--- a/sql/core/src/test/resources/sql-functions/sql-expression-schema.md
+++ b/sql/core/src/test/resources/sql-functions/sql-expression-schema.md
@@ -287,10 +287,6 @@
| org.apache.spark.sql.catalyst.expressions.Rint | rint | SELECT rint(12.3456)
| struct<rint(12.3456):double> |
| org.apache.spark.sql.catalyst.expressions.Round | round | SELECT round(2.5,
0) | struct<round(2.5, 0):decimal(2,0)> |
| org.apache.spark.sql.catalyst.expressions.RowNumber | row_number | SELECT a,
b, row_number() OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1',
1), ('A2', 3), ('A1', 1) tab(a, b) | struct<a:string,b:int,row_number() OVER
(PARTITION BY a ORDER BY b ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND
CURRENT ROW):int> |
-| org.apache.spark.sql.catalyst.expressions.ST_AsBinary | st_asbinary | SELECT
hex(st_asbinary(st_geogfromwkb(X'0101000000000000000000F03F0000000000000040')))
|
struct<hex(st_asbinary(st_geogfromwkb(X'0101000000000000000000F03F0000000000000040'))):string>
|
-| org.apache.spark.sql.catalyst.expressions.ST_GeogFromWKB | st_geogfromwkb |
SELECT
hex(st_asbinary(st_geogfromwkb(X'0101000000000000000000F03F0000000000000040')))
|
struct<hex(st_asbinary(st_geogfromwkb(X'0101000000000000000000F03F0000000000000040'))):string>
|
-| org.apache.spark.sql.catalyst.expressions.ST_GeomFromWKB | st_geomfromwkb |
SELECT
hex(st_asbinary(st_geomfromwkb(X'0101000000000000000000F03F0000000000000040')))
|
struct<hex(st_asbinary(st_geomfromwkb(X'0101000000000000000000F03F0000000000000040'))):string>
|
-| org.apache.spark.sql.catalyst.expressions.ST_Srid | st_srid | SELECT
st_srid(st_geogfromwkb(X'0101000000000000000000F03F0000000000000040')) |
struct<st_srid(st_geogfromwkb(X'0101000000000000000000F03F0000000000000040')):int>
|
| org.apache.spark.sql.catalyst.expressions.SchemaOfCsv | schema_of_csv |
SELECT schema_of_csv('1,abc') | struct<schema_of_csv(1,abc):string> |
| org.apache.spark.sql.catalyst.expressions.SchemaOfJson | schema_of_json |
SELECT schema_of_json('[{"col":0}]') |
struct<schema_of_json([{"col":0}]):string> |
| org.apache.spark.sql.catalyst.expressions.SchemaOfXml | schema_of_xml |
SELECT schema_of_xml('<p><a>1</a></p>') |
struct<schema_of_xml(<p><a>1</a></p>):string> |
@@ -481,6 +477,10 @@
| org.apache.spark.sql.catalyst.expressions.aggregate.VariancePop | var_pop |
SELECT var_pop(col) FROM VALUES (1), (2), (3) AS tab(col) |
struct<var_pop(col):double> |
| org.apache.spark.sql.catalyst.expressions.aggregate.VarianceSamp | var_samp
| SELECT var_samp(col) FROM VALUES (1), (2), (3) AS tab(col) |
struct<var_samp(col):double> |
| org.apache.spark.sql.catalyst.expressions.aggregate.VarianceSamp | variance
| SELECT variance(col) FROM VALUES (1), (2), (3) AS tab(col) |
struct<variance(col):double> |
+| org.apache.spark.sql.catalyst.expressions.st.ST_AsBinary | st_asbinary |
SELECT
hex(st_asbinary(st_geogfromwkb(X'0101000000000000000000F03F0000000000000040')))
|
struct<hex(st_asbinary(st_geogfromwkb(X'0101000000000000000000F03F0000000000000040'))):string>
|
+| org.apache.spark.sql.catalyst.expressions.st.ST_GeogFromWKB | st_geogfromwkb
| SELECT
hex(st_asbinary(st_geogfromwkb(X'0101000000000000000000F03F0000000000000040')))
|
struct<hex(st_asbinary(st_geogfromwkb(X'0101000000000000000000F03F0000000000000040'))):string>
|
+| org.apache.spark.sql.catalyst.expressions.st.ST_GeomFromWKB | st_geomfromwkb
| SELECT
hex(st_asbinary(st_geomfromwkb(X'0101000000000000000000F03F0000000000000040')))
|
struct<hex(st_asbinary(st_geomfromwkb(X'0101000000000000000000F03F0000000000000040'))):string>
|
+| org.apache.spark.sql.catalyst.expressions.st.ST_Srid | st_srid | SELECT
st_srid(st_geogfromwkb(X'0101000000000000000000F03F0000000000000040')) |
struct<st_srid(st_geogfromwkb(X'0101000000000000000000F03F0000000000000040')):int>
|
| org.apache.spark.sql.catalyst.expressions.variant.IsVariantNull |
is_variant_null | SELECT is_variant_null(parse_json('null')) |
struct<is_variant_null(parse_json(null)):boolean> |
| org.apache.spark.sql.catalyst.expressions.variant.ParseJsonExpressionBuilder
| parse_json | SELECT parse_json('{"a":1,"b":0.8}') |
struct<parse_json({"a":1,"b":0.8}):variant> |
| org.apache.spark.sql.catalyst.expressions.variant.SchemaOfVariant |
schema_of_variant | SELECT schema_of_variant(parse_json('null')) |
struct<schema_of_variant(parse_json(null)):string> |
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/STExpressionsSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/STExpressionsSuite.scala
index 71353e1bc29b..1b73617d050e 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/STExpressionsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/STExpressionsSuite.scala
@@ -18,6 +18,7 @@
package org.apache.spark.sql
import org.apache.spark.sql.catalyst.expressions._
+import org.apache.spark.sql.catalyst.expressions.st._
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]