This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-4.1
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-4.1 by this push:
     new 3eed713e51a9 [SPARK-54801][SQL] Mark a few new 4.1 configs as internal
3eed713e51a9 is described below

commit 3eed713e51a9d485c9bc5cffb82f4f2ee58e92b4
Author: Wenchen Fan <[email protected]>
AuthorDate: Tue Dec 23 09:21:03 2025 +0900

    [SPARK-54801][SQL] Mark a few new 4.1 configs as internal
    
    ### What changes were proposed in this pull request?
    
    We don't expect users to specify these configs as they touch internal 
details. For example, recursive CTE can use SQL syntax `MAX RECURSION LEVEL` 
and `LIMIT` to specify the row/level limits, people shouldn't set the configs.
    
    ### Why are the changes needed?
    
    avoid exposing internal configs.
    
    ### Does this PR introduce _any_ user-facing change?
    
    no, it's actually a doc change.
    
    ### How was this patch tested?
    
    N/A
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    cursor 2.2.20
    
    Closes #53563 from cloud-fan/internal_conf.
    
    Authored-by: Wenchen Fan <[email protected]>
    Signed-off-by: Hyukjin Kwon <[email protected]>
    (cherry picked from commit c9cb5a240144b1ef6ce272850834250d14eb9fa3)
    Signed-off-by: Hyukjin Kwon <[email protected]>
---
 .../src/main/scala/org/apache/spark/sql/internal/SQLConf.scala | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index 994291d3db5a..100149a39211 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -575,6 +575,7 @@ object SQLConf {
 
   val GEOSPATIAL_ENABLED =
     buildConf("spark.sql.geospatial.enabled")
+      .internal()
       .doc("When true, enables geospatial types (GEOGRAPHY/GEOMETRY) and ST 
functions.")
       .version("4.1.0")
       .booleanConf
@@ -934,6 +935,7 @@ object SQLConf {
 
   val ADAPTIVE_EXECUTION_ENABLED_IN_STATELESS_STREAMING =
     buildConf("spark.sql.adaptive.streaming.stateless.enabled")
+      .internal()
       .doc("When true, enable adaptive query execution for stateless streaming 
query. To " +
         "enable this config, `spark.sql.adaptive.enabled` needs to be also 
enabled.")
       .version("4.1.0")
@@ -1221,6 +1223,7 @@ object SQLConf {
 
   val MAP_ZIP_WITH_USES_JAVA_COLLECTIONS =
     buildConf("spark.sql.mapZipWithUsesJavaCollections")
+      .internal()
       .doc("When true, the `map_zip_with` function uses Java collections 
instead of Scala " +
         "collections. This is useful for avoiding NaN equality issues.")
       .version("4.1.0")
@@ -1564,6 +1567,7 @@ object SQLConf {
 
   val PARQUET_VECTORIZED_READER_NULL_TYPE_ENABLED =
     buildConf("spark.sql.parquet.enableNullTypeVectorizedReader")
+      .internal()
       .doc("Enables vectorized Parquet reader support for NullType columns.")
       .version("4.1.0")
       .booleanConf
@@ -1597,6 +1601,7 @@ object SQLConf {
 
   val PARQUET_ANNOTATE_VARIANT_LOGICAL_TYPE =
     buildConf("spark.sql.parquet.variant.annotateLogicalType.enabled")
+      .internal()
       .doc("When enabled, Spark annotates the variant groups written to 
Parquet as the parquet " +
         "variant logical type.")
       .version("4.1.0")
@@ -5280,6 +5285,7 @@ object SQLConf {
     .createWithDefault(LegacyBehaviorPolicy.CORRECTED)
 
   val CTE_RECURSION_LEVEL_LIMIT = buildConf("spark.sql.cteRecursionLevelLimit")
+    .internal()
     .doc("Maximum level of recursion that is allowed while executing a 
recursive CTE definition." +
       "If a query does not get exhausted before reaching this limit it fails. 
Use -1 for " +
       "unlimited.")
@@ -5288,6 +5294,7 @@ object SQLConf {
     .createWithDefault(100)
 
   val CTE_RECURSION_ROW_LIMIT = buildConf("spark.sql.cteRecursionRowLimit")
+    .internal()
     .doc("Maximum number of rows that can be returned when executing a 
recursive CTE definition." +
       "If a query does not get exhausted before reaching this limit it fails. 
Use -1 for " +
       "unlimited.")
@@ -5297,6 +5304,7 @@ object SQLConf {
 
   val CTE_RECURSION_ANCHOR_ROWS_LIMIT_TO_CONVERT_TO_LOCAL_RELATION =
     buildConf("spark.sql.cteRecursionAnchorRowsLimitToConvertToLocalRelation")
+      .internal()
       .doc("Maximum number of rows that the anchor in a recursive CTE can 
return for it to be" +
         "converted to a localRelation.")
       .version("4.1.0")
@@ -5458,6 +5466,7 @@ object SQLConf {
       .createWithDefault(false)
 
   val PYTHON_FILTER_PUSHDOWN_ENABLED = 
buildConf("spark.sql.python.filterPushdown.enabled")
+    .internal()
     .doc("When true, enable filter pushdown to Python datasource, at the cost 
of running " +
       "Python worker one additional time during planning.")
     .version("4.1.0")
@@ -6631,6 +6640,7 @@ object SQLConf {
 
   val TIME_TYPE_ENABLED =
     buildConf("spark.sql.timeType.enabled")
+      .internal()
       .doc("When true, the TIME data type is supported.")
       .version("4.1.0")
       .booleanConf


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to