This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new bc4493349936 [SPARK-53202][SQL][TESTS] Use `SparkFileUtils.touch` 
instead of `Files.touch`
bc4493349936 is described below

commit bc4493349936e85fd91bc1feb41771b4b741d177
Author: Dongjoon Hyun <dongj...@apache.org>
AuthorDate: Fri Aug 8 09:17:37 2025 -0700

    [SPARK-53202][SQL][TESTS] Use `SparkFileUtils.touch` instead of 
`Files.touch`
    
    ### What changes were proposed in this pull request?
    
    This PR aims to use `SparkFileUtils.touch` instead of 
`com.google.common.io.Files.touch`.
    
    ### Why are the changes needed?
    
    To simplify Spark code path.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No behavior change.
    
    ### How was this patch tested?
    
    Pass the CIs.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    No.
    
    Closes #51931 from dongjoon-hyun/SPARK-53202.
    
    Authored-by: Dongjoon Hyun <dongj...@apache.org>
    Signed-off-by: Dongjoon Hyun <dongj...@apache.org>
---
 scalastyle-config.xml                                     |  5 +++++
 .../parquet/ParquetPartitionDiscoverySuite.scala          | 15 ++++++++-------
 .../org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala |  4 ++--
 .../spark/sql/sources/ParquetHadoopFsRelationSuite.scala  |  3 ++-
 4 files changed, 17 insertions(+), 10 deletions(-)

diff --git a/scalastyle-config.xml b/scalastyle-config.xml
index 4d72222ee3f8..a75f622c201c 100644
--- a/scalastyle-config.xml
+++ b/scalastyle-config.xml
@@ -418,6 +418,11 @@ This file is divided into 3 sections:
     <customMessage>Use touch of SparkFileUtil or Utils instead.</customMessage>
   </check>
 
+  <check customId="filestouch" level="error" 
class="org.scalastyle.file.RegexChecker" enabled="true">
+    <parameters><parameter 
name="regex">\bFiles\.touch\b</parameter></parameters>
+    <customMessage>Use touch of SparkFileUtil or Utils instead.</customMessage>
+  </check>
+
   <check customId="writeStringToFile" level="error" 
class="org.scalastyle.file.RegexChecker" enabled="true">
     <parameters><parameter 
name="regex">\bFileUtils\.writeStringToFile\b</parameter></parameters>
     <customMessage>Use java.nio.file.Files.writeString instead.</customMessage>
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
index 77e5c3b6fbfb..48b41fafad3c 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
@@ -42,6 +42,7 @@ import org.apache.spark.sql.internal.SQLConf
 import org.apache.spark.sql.test.SharedSparkSession
 import org.apache.spark.sql.types._
 import org.apache.spark.unsafe.types.UTF8String
+import org.apache.spark.util.Utils
 
 // The data where the partitioning key exists only in the directory structure.
 case class ParquetData(intField: Int, stringField: String)
@@ -821,7 +822,7 @@ abstract class ParquetPartitionDiscoverySuite
         .partitionBy("b", "c", "d")
         .save(dir.getCanonicalPath)
 
-      Files.touch(new File(s"${dir.getCanonicalPath}/b=1", ".DS_Store"))
+      Utils.touch(new File(s"${dir.getCanonicalPath}/b=1", ".DS_Store"))
       Files.createParentDirs(new 
File(s"${dir.getCanonicalPath}/b=1/c=1/.foo/bar"))
 
       checkAnswer(spark.read.format("parquet").load(dir.getCanonicalPath), df)
@@ -838,7 +839,7 @@ abstract class ParquetPartitionDiscoverySuite
         .partitionBy("b", "c", "d")
         .save(tablePath.getCanonicalPath)
 
-      Files.touch(new File(s"${tablePath.getCanonicalPath}/", "_SUCCESS"))
+      Utils.touch(new File(s"${tablePath.getCanonicalPath}/", "_SUCCESS"))
       Files.createParentDirs(new 
File(s"${dir.getCanonicalPath}/b=1/c=1/.foo/bar"))
 
       
checkAnswer(spark.read.format("parquet").load(tablePath.getCanonicalPath), df)
@@ -855,7 +856,7 @@ abstract class ParquetPartitionDiscoverySuite
         .partitionBy("b", "c", "d")
         .save(tablePath.getCanonicalPath)
 
-      Files.touch(new File(s"${tablePath.getCanonicalPath}/", "_SUCCESS"))
+      Utils.touch(new File(s"${tablePath.getCanonicalPath}/", "_SUCCESS"))
       Files.createParentDirs(new 
File(s"${dir.getCanonicalPath}/b=1/c=1/.foo/bar"))
 
       
checkAnswer(spark.read.format("parquet").load(tablePath.getCanonicalPath), df)
@@ -953,9 +954,9 @@ abstract class ParquetPartitionDiscoverySuite
             .partitionBy("b", "c", "d")
             .save(tablePath.getCanonicalPath)
 
-          Files.touch(new File(s"${tablePath.getCanonicalPath}/b=1", 
"_SUCCESS"))
-          Files.touch(new File(s"${tablePath.getCanonicalPath}/b=1/c=1", 
"_SUCCESS"))
-          Files.touch(new File(s"${tablePath.getCanonicalPath}/b=1/c=1/d=1", 
"_SUCCESS"))
+          Utils.touch(new File(s"${tablePath.getCanonicalPath}/b=1", 
"_SUCCESS"))
+          Utils.touch(new File(s"${tablePath.getCanonicalPath}/b=1/c=1", 
"_SUCCESS"))
+          Utils.touch(new File(s"${tablePath.getCanonicalPath}/b=1/c=1/d=1", 
"_SUCCESS"))
           
checkAnswer(spark.read.format("parquet").load(tablePath.getCanonicalPath), df)
         }
       }
@@ -1063,7 +1064,7 @@ abstract class ParquetPartitionDiscoverySuite
 
       Files.copy(new File(p1, "_metadata"), new File(p0, "_metadata"))
       Files.copy(new File(p1, "_common_metadata"), new File(p0, 
"_common_metadata"))
-      Files.touch(new File(p0, ".dummy"))
+      Utils.touch(new File(p0, ".dummy"))
 
       checkAnswer(spark.read.parquet(s"$path"), Seq(
         Row(0, 0, 0),
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala
index 15f8666322e3..45f5d2f273e3 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala
@@ -19,7 +19,6 @@ package org.apache.spark.sql.hive.orc
 
 import java.io.File
 
-import com.google.common.io.Files
 import org.apache.hadoop.fs.Path
 import org.apache.orc.OrcConf
 
@@ -32,6 +31,7 @@ import org.apache.spark.sql.hive.{HiveSessionCatalog, 
HiveUtils}
 import org.apache.spark.sql.hive.test.TestHiveSingleton
 import org.apache.spark.sql.internal.SQLConf
 import org.apache.spark.sql.types.TimeType
+import org.apache.spark.util.Utils
 
 class HiveOrcQuerySuite extends OrcQueryTest with TestHiveSingleton {
   import testImplicits._
@@ -195,7 +195,7 @@ class HiveOrcQuerySuite extends OrcQueryTest with 
TestHiveSingleton {
         withTempPath { dir =>
           withTable("spark_19809") {
             sql(s"CREATE TABLE spark_19809(a int) STORED AS ORC LOCATION 
'$dir'")
-            Files.touch(new File(s"${dir.getCanonicalPath}", "zero.orc"))
+            Utils.touch(new File(s"${dir.getCanonicalPath}", "zero.orc"))
 
             Seq(true, false).foreach { convertMetastoreOrc =>
               withSQLConf(HiveUtils.CONVERT_METASTORE_ORC.key -> 
convertMetastoreOrc.toString) {
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/sources/ParquetHadoopFsRelationSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/sources/ParquetHadoopFsRelationSuite.scala
index 84ee19e62bca..e76b2f06f5c0 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/sources/ParquetHadoopFsRelationSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/sources/ParquetHadoopFsRelationSuite.scala
@@ -29,6 +29,7 @@ import 
org.apache.spark.sql.execution.datasources.SQLHadoopMapReduceCommitProtoc
 import 
org.apache.spark.sql.execution.datasources.parquet.ParquetCompressionCodec
 import org.apache.spark.sql.internal.SQLConf
 import org.apache.spark.sql.types._
+import org.apache.spark.util.Utils
 
 
 class ParquetHadoopFsRelationSuite extends HadoopFsRelationTest {
@@ -89,7 +90,7 @@ class ParquetHadoopFsRelationSuite extends 
HadoopFsRelationTest {
       // since it's not a valid Parquet file.
       val emptyFile = new File(path, "empty")
       Files.createParentDirs(emptyFile)
-      Files.touch(emptyFile)
+      Utils.touch(emptyFile)
 
       // This shouldn't throw anything.
       df.write.format("parquet").mode(SaveMode.Ignore).save(path)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to