This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 14cc174a4e4c [SPARK-54802][SQL][DOCS] Fix typos in OrderedFilters.scala
14cc174a4e4c is described below

commit 14cc174a4e4cf3fef598587749ee95e400be62ab
Author: Chris Boumalhab <[email protected]>
AuthorDate: Tue Dec 23 10:38:17 2025 +0900

    [SPARK-54802][SQL][DOCS] Fix typos in OrderedFilters.scala
    
    ### What changes were proposed in this pull request?
    
    Found 3 typos in OrderedFilters.scala
    
    ### Why are the changes needed?
    
    Fix grammar
    
    ### Does this PR introduce _any_ user-facing change?
    
    No
    
    ### How was this patch tested?
    
    N/A
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    No
    
    Closes #53566 from cboumalh/cboumalh-typos-fix-1.
    
    Authored-by: Chris Boumalhab <[email protected]>
    Signed-off-by: Hyukjin Kwon <[email protected]>
---
 .../main/scala/org/apache/spark/sql/catalyst/OrderedFilters.scala   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/OrderedFilters.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/OrderedFilters.scala
index b7c8a0140ea6..81bf24beba53 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/OrderedFilters.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/OrderedFilters.scala
@@ -33,7 +33,7 @@ class OrderedFilters(filters: Seq[sources.Filter], 
requiredSchema: StructType)
   extends StructFilters(filters, requiredSchema) {
   /**
    * Converted filters to predicates and grouped by maximum field index
-   * in the read schema. For example, if an filter refers to 2 attributes
+   * in the read schema. For example, if a filter refers to 2 attributes
    * attrA with field index 5 and attrB with field index 10 in the read schema:
    *   0 === $"attrA" or $"attrB" < 100
    * the filter is compiled to a predicate, and placed to the `predicates`
@@ -64,7 +64,7 @@ class OrderedFilters(filters: Seq[sources.Filter], 
requiredSchema: StructType)
     }
     if (len > 0 && groupedFilters(0).nonEmpty) {
       // We assume that filters w/o refs like `AlwaysTrue` and `AlwaysFalse`
-      // can be evaluated faster that others. We put them in front of others.
+      // can be evaluated faster than others. We put them in front of others.
       val (literals, others) = 
groupedFilters(0).partition(_.references.isEmpty)
       groupedFilters(0) = literals ++ others
     }
@@ -81,7 +81,7 @@ class OrderedFilters(filters: Seq[sources.Filter], 
requiredSchema: StructType)
    * @param row The internal row to check.
    * @param index Maximum field index. The function assumes that all fields
    *              from 0 to `index` position are set.
-   * @return false` iff row fields at the position from 0 to `index` pass 
filters
+   * @return `false` iff row fields at the position from 0 to `index` pass 
filters
    *         or there are no applicable filters
    *         otherwise `false` if at least one of the filters returns `false`.
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to