This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 031af163646e [SPARK-54149][SQL][SS][CONNECT] Enable tail-recursion 
wherever possible
031af163646e is described below

commit 031af163646e2a8ba43aba51fa66315fd11f6f76
Author: yangjie01 <[email protected]>
AuthorDate: Tue Nov 4 10:00:34 2025 -0800

    [SPARK-54149][SQL][SS][CONNECT] Enable tail-recursion wherever possible
    
    ### What changes were proposed in this pull request?
    This pr adds `scala.annotation.tailrec` inspected by IDE (IntelliJ),  these 
are new cases after Spark 4.1.0.
    
    ### Why are the changes needed?
    To improve performance.
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Pass GItHub Actions
    
    ### Was this patch authored or co-authored using generative AI tooling?
    No
    
    Closes #52847 from LuciferYang/SPARK-54149.
    
    Authored-by: yangjie01 <[email protected]>
    Signed-off-by: Dongjoon Hyun <[email protected]>
---
 .../src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala     | 1 +
 .../org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala   | 1 +
 .../apache/spark/sql/connect/common/LiteralValueProtoConverter.scala | 1 +
 .../execution/datasources/parquet/InferVariantShreddingSchema.scala  | 1 +
 .../spark/sql/execution/streaming/runtime/WatermarkPropagator.scala  | 1 +
 .../org/apache/spark/sql/scripting/SqlScriptingExecutionNode.scala   | 5 ++++-
 6 files changed, 9 insertions(+), 1 deletion(-)

diff --git 
a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala
 
b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala
index fb18dca97994..df5dfdf7deaf 100644
--- 
a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala
+++ 
b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala
@@ -181,6 +181,7 @@ private[v2] trait V2JDBCTest
   def multiplePartitionAdditionalCheck(
       df: DataFrame,
       partitioningInfo: Option[PartitioningInfo]): Unit = {
+    @scala.annotation.tailrec
     def getJDBCRDD(rdd: RDD[_]): Option[JDBCRDD] = {
       rdd match {
         case jdbcRdd: JDBCRDD => Some(jdbcRdd)
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala
index 084be5a35045..43ba01fa7980 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala
@@ -216,6 +216,7 @@ case class ExpressionEncoder[T](
     StructField(s.name, s.dataType, s.nullable)
   })
 
+  @scala.annotation.tailrec
   private def transformerOfOption(enc: AgnosticEncoder[_]): Boolean =
     enc match {
       case t: TransformingEncoder[_, _] => transformerOfOption(t.transformed)
diff --git 
a/sql/connect/common/src/main/scala/org/apache/spark/sql/connect/common/LiteralValueProtoConverter.scala
 
b/sql/connect/common/src/main/scala/org/apache/spark/sql/connect/common/LiteralValueProtoConverter.scala
index b5a648d6111a..63c43f956d78 100644
--- 
a/sql/connect/common/src/main/scala/org/apache/spark/sql/connect/common/LiteralValueProtoConverter.scala
+++ 
b/sql/connect/common/src/main/scala/org/apache/spark/sql/connect/common/LiteralValueProtoConverter.scala
@@ -285,6 +285,7 @@ object LiteralValueProtoConverter {
       case None =>
         val builder = toLiteralProtoBuilderInternal(literal, options)
         if (!options.useDeprecatedDataTypeFields) {
+          @scala.annotation.tailrec
           def unwrapArraySeq(value: Any): Any = value match {
             case arraySeq: mutable.ArraySeq[_] => 
unwrapArraySeq(arraySeq.array)
             case arraySeq: immutable.ArraySeq[_] => 
unwrapArraySeq(arraySeq.unsafeArray)
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/InferVariantShreddingSchema.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/InferVariantShreddingSchema.scala
index 80ba3ca841bd..1ebb61968150 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/InferVariantShreddingSchema.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/InferVariantShreddingSchema.scala
@@ -64,6 +64,7 @@ class InferVariantShreddingSchema(val schema: StructType) {
    * Return the VariantVal at the given path in the schema, or None if the 
Variant value or any of
    * its containing structs is null.
    */
+  @scala.annotation.tailrec
   private def getValueAtPath(schema: StructType, row: InternalRow, path: 
Seq[Int]):
       Option[VariantVal] = {
     if (row.isNullAt(path.head)) {
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/WatermarkPropagator.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/WatermarkPropagator.scala
index 7c78069d0858..e573ea0e65ef 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/WatermarkPropagator.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/WatermarkPropagator.scala
@@ -175,6 +175,7 @@ class PropagateWatermarkSimulator extends 
WatermarkPropagator with Logging {
   private def getInputWatermarks(
       node: SparkPlan,
       nodeToOutputWatermark: mutable.Map[Int, Option[Long]]): Seq[Long] = {
+    @scala.annotation.tailrec
     def watermarkForChild(child: SparkPlan): Option[Long] = {
       child match {
         case s: InMemoryTableScanExec => 
watermarkForChild(s.relation.cachedPlan)
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/scripting/SqlScriptingExecutionNode.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/scripting/SqlScriptingExecutionNode.scala
index aa2c2f405021..0cc7431b623c 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/scripting/SqlScriptingExecutionNode.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/scripting/SqlScriptingExecutionNode.scala
@@ -20,6 +20,8 @@ package org.apache.spark.sql.scripting
 import java.util
 import java.util.{Locale, UUID}
 
+import scala.annotation.tailrec
+
 import org.apache.spark.SparkException
 import org.apache.spark.internal.Logging
 import org.apache.spark.sql.Row
@@ -309,7 +311,7 @@ class CompoundBodyExec(
         !stopIteration && (localIterator.hasNext || childHasNext)
       }
 
-      @scala.annotation.tailrec
+      @tailrec
       override def next(): CompoundStatementExec = {
         curr match {
           case None => throw SparkException.internalError(
@@ -1056,6 +1058,7 @@ class ForStatementExec(
         case ForState.Body => 
bodyWithVariables.exists(_.getTreeIterator.hasNext)
       })
 
+      @tailrec
       override def next(): CompoundStatementExec = state match {
 
         case ForState.VariableAssignment =>


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to