This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 0cd2f9019a09 [SPARK-53035][TESTS][FOLLOWUP] Use `String.repeat` in 
`tests` too
0cd2f9019a09 is described below

commit 0cd2f9019a09eac561266c0f6c08b4fa550d54b2
Author: Dongjoon Hyun <dongj...@apache.org>
AuthorDate: Fri Aug 1 12:46:29 2025 -0700

    [SPARK-53035][TESTS][FOLLOWUP] Use `String.repeat` in `tests` too
    
    ### What changes were proposed in this pull request?
    
    This PR is a follow-up of the following PR to use `String.repeat` in 
`tests` code too in order to save Apache Spark and downstream testing resources.
    
    - https://github.com/apache/spark/pull/51740
    
    For the following existing test example, we can see that Java is faster 
than `Scala 2.13` like the following.
    
    
https://github.com/apache/spark/blob/7c96b3e36e3dafad5a025a4f53a3a72e9c089ea3/sql/core/src/test/scala/org/apache/spark/sql/CsvFunctionsSuite.scala#L828
    
    
https://github.com/apache/spark/blob/7c96b3e36e3dafad5a025a4f53a3a72e9c089ea3/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/variant/VariantExpressionEvalUtilsSuite.scala#L136
    
    
https://github.com/apache/spark/blob/7c96b3e36e3dafad5a025a4f53a3a72e9c089ea3/sql/core/src/test/scala/org/apache/spark/sql/VariantEndToEndSuite.scala#L158
    
    ```scala
    scala> spark.time("a" * (16 * 1024 * 1024))
    Time taken: 47 ms
    
    scala> spark.time("a".repeat(16 * 1024 * 1024))
    Time taken: 1 ms
    ```
    
    Note that this PR doesn't aim to enforce anything to the new code. The 
focus is improving the existing codebase and new code can use `Scala` string 
multiplication still.
    
    ### Why are the changes needed?
    
    `String.repeat` is **significantly faster** than Scala implementation.
    
    ```scala
    scala> spark.time((" " * 500_000_000).length)
    Time taken: 1408 ms
    val res0: Int = 500000000
    
    scala> spark.time((" ".repeat(500_000_000)).length)
    Time taken: 22 ms
    val res1: Int = 500000000
    ```
    
    ### Does this PR introduce _any_ user-facing change?
    
    No because this is a test case change.
    
    ### How was this patch tested?
    
    Pass the CIs.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    No.
    
    Closes #51779 from dongjoon-hyun/SPARK-53035-2.
    
    Authored-by: Dongjoon Hyun <dongj...@apache.org>
    Signed-off-by: Dongjoon Hyun <dongj...@apache.org>
---
 .../scala/org/apache/spark/util/IvyTestUtils.scala |  6 +--
 .../test/scala/org/apache/spark/FileSuite.scala    | 18 ++++-----
 .../org/apache/spark/benchmark/BenchmarkBase.scala |  2 +-
 .../deploy/history/EventLogFileReadersSuite.scala  |  4 +-
 .../deploy/history/EventLogFileWritersSuite.scala  |  2 +-
 .../scala/org/apache/spark/util/UtilsSuite.scala   |  4 +-
 .../spark/util/collection/SizeTrackerSuite.scala   |  6 +--
 .../apache/spark/ml/feature/Word2VecSuite.scala    |  8 ++--
 .../apache/spark/mllib/feature/Word2VecSuite.scala |  2 +-
 .../spark/deploy/k8s/KubernetesConfSuite.scala     | 10 ++---
 .../features/BasicExecutorFeatureStepSuite.scala   |  2 +-
 .../features/DriverServiceFeatureStepSuite.scala   |  2 +-
 .../k8s/integrationtest/BasicTestsSuite.scala      |  2 +-
 .../catalyst/encoders/ExpressionEncoderSuite.scala | 45 +++++++++++-----------
 .../sql/catalyst/encoders/RowEncoderSuite.scala    |  4 +-
 .../expressions/ArithmeticExpressionSuite.scala    |  6 +--
 .../expressions/DateExpressionsSuite.scala         |  2 +-
 .../expressions/JsonExpressionsSuite.scala         |  2 +-
 .../expressions/StringExpressionsSuite.scala       |  4 +-
 .../variant/VariantExpressionEvalUtilsSuite.scala  | 14 +++----
 .../variant/VariantExpressionSuite.scala           |  4 +-
 .../spark/sql/connect/ClientE2ETestSuite.scala     |  4 +-
 .../sql/connect/messages/AbbreviateSuite.scala     | 15 +++++---
 .../org/apache/spark/sql/CachedTableSuite.scala    |  2 +-
 .../apache/spark/sql/CharVarcharTestSuite.scala    | 27 ++++++-------
 .../org/apache/spark/sql/CsvFunctionsSuite.scala   |  2 +-
 .../apache/spark/sql/DataFrameAggregateSuite.scala |  2 +-
 .../org/apache/spark/sql/MathFunctionsSuite.scala  |  3 +-
 .../org/apache/spark/sql/PlanStabilitySuite.scala  |  2 +-
 .../apache/spark/sql/VariantEndToEndSuite.scala    | 20 +++++-----
 .../scala/org/apache/spark/sql/VariantSuite.scala  |  6 +--
 .../org/apache/spark/sql/execution/SortSuite.scala |  3 +-
 .../sql/execution/benchmark/Base64Benchmark.scala  |  4 +-
 .../execution/benchmark/CharVarcharBenchmark.scala |  2 +-
 .../benchmark/DataSourceReadBenchmark.scala        |  8 ++--
 .../execution/benchmark/LargeRowBenchmark.scala    |  2 +-
 .../benchmark/NestedSchemaPruningBenchmark.scala   |  4 +-
 .../datasources/FileSourceStrategySuite.scala      |  2 +-
 .../sql/execution/datasources/csv/CSVSuite.scala   |  4 +-
 .../execution/datasources/json/JsonBenchmark.scala |  2 +-
 .../sql/execution/datasources/json/JsonSuite.scala |  2 +-
 .../execution/datasources/json/TestJsonData.scala  |  4 +-
 .../execution/datasources/orc/OrcQuerySuite.scala  | 20 +++++-----
 .../parquet/ParquetColumnIndexSuite.scala          |  6 +--
 .../parquet/ParquetPartitionDiscoverySuite.scala   | 10 ++---
 .../sql/execution/datasources/xml/XmlSuite.scala   |  9 ++---
 .../sql/execution/joins/HashedRelationSuite.scala  |  6 +--
 .../execution/streaming/state/RocksDBSuite.scala   |  2 +-
 .../StateSchemaCompatibilityCheckerSuite.scala     | 16 ++++----
 .../spark/sql/sources/FilteredScanSuite.scala      |  6 +--
 .../thriftserver/SparkMetadataOperationSuite.scala |  2 +-
 .../apache/spark/sql/hive/ErrorPositionSuite.scala |  2 +-
 .../hive/execution/HiveSerDeReadWriteSuite.scala   |  5 ++-
 .../spark/sql/hive/execution/SQLQuerySuite.scala   |  2 +-
 .../util/RateLimitedOutputStreamSuite.scala        |  2 +-
 55 files changed, 182 insertions(+), 175 deletions(-)

diff --git 
a/common/utils/src/test/scala/org/apache/spark/util/IvyTestUtils.scala 
b/common/utils/src/test/scala/org/apache/spark/util/IvyTestUtils.scala
index ea46b1b835e6..e240603ee252 100644
--- a/common/utils/src/test/scala/org/apache/spark/util/IvyTestUtils.scala
+++ b/common/utils/src/test/scala/org/apache/spark/util/IvyTestUtils.scala
@@ -161,9 +161,9 @@ private[spark] object IvyTestUtils {
 
   /** Helper method to write artifact information in the pom. */
   private def pomArtifactWriter(artifact: MavenCoordinate, tabCount: Int = 1): 
String = {
-    var result = "\n" + "  " * tabCount + 
s"<groupId>${artifact.groupId}</groupId>"
-    result += "\n" + "  " * tabCount + 
s"<artifactId>${artifact.artifactId}</artifactId>"
-    result += "\n" + "  " * tabCount + 
s"<version>${artifact.version}</version>"
+    var result = "\n" + "  ".repeat(tabCount) + 
s"<groupId>${artifact.groupId}</groupId>"
+    result += "\n" + "  ".repeat(tabCount) + 
s"<artifactId>${artifact.artifactId}</artifactId>"
+    result += "\n" + "  ".repeat(tabCount) + 
s"<version>${artifact.version}</version>"
     result
   }
 
diff --git a/core/src/test/scala/org/apache/spark/FileSuite.scala 
b/core/src/test/scala/org/apache/spark/FileSuite.scala
index 5f9912cbd021..b255436710b6 100644
--- a/core/src/test/scala/org/apache/spark/FileSuite.scala
+++ b/core/src/test/scala/org/apache/spark/FileSuite.scala
@@ -81,7 +81,7 @@ class FileSuite extends SparkFunSuite with LocalSparkContext {
     val compressedOutputDir = new File(tempDir, 
"output_compressed").getAbsolutePath
     val codec = new DefaultCodec()
 
-    val data = sc.parallelize("a" * 10000, 1)
+    val data = sc.parallelize("a".repeat(10000), 1)
     data.saveAsTextFile(normalDir)
     data.saveAsTextFile(compressedOutputDir, classOf[DefaultCodec])
 
@@ -107,7 +107,7 @@ class FileSuite extends SparkFunSuite with 
LocalSparkContext {
   test("SequenceFiles") {
     sc = new SparkContext("local", "test")
     val outputDir = new File(tempDir, "output").getAbsolutePath
-    val nums = sc.makeRDD(1 to 3).map(x => (x, "a" * x)) // (1,a), (2,aa), 
(3,aaa)
+    val nums = sc.makeRDD(1 to 3).map(x => (x, "a".repeat(x))) // (1,a), 
(2,aa), (3,aaa)
     nums.saveAsSequenceFile(outputDir)
     // Try reading the output back as a SequenceFile
     val output = sc.sequenceFile[IntWritable, Text](outputDir)
@@ -157,7 +157,7 @@ class FileSuite extends SparkFunSuite with 
LocalSparkContext {
   test("SequenceFile with writable key") {
     sc = new SparkContext("local", "test")
     val outputDir = new File(tempDir, "output").getAbsolutePath
-    val nums = sc.makeRDD(1 to 3).map(x => (new IntWritable(x), "a" * x))
+    val nums = sc.makeRDD(1 to 3).map(x => (new IntWritable(x), "a".repeat(x)))
     nums.saveAsSequenceFile(outputDir)
     // Try reading the output back as a SequenceFile
     val output = sc.sequenceFile[IntWritable, Text](outputDir)
@@ -167,7 +167,7 @@ class FileSuite extends SparkFunSuite with 
LocalSparkContext {
   test("SequenceFile with writable value") {
     sc = new SparkContext("local", "test")
     val outputDir = new File(tempDir, "output").getAbsolutePath
-    val nums = sc.makeRDD(1 to 3).map(x => (x, new Text("a" * x)))
+    val nums = sc.makeRDD(1 to 3).map(x => (x, new Text("a".repeat(x))))
     nums.saveAsSequenceFile(outputDir)
     // Try reading the output back as a SequenceFile
     val output = sc.sequenceFile[IntWritable, Text](outputDir)
@@ -177,7 +177,7 @@ class FileSuite extends SparkFunSuite with 
LocalSparkContext {
   test("SequenceFile with writable key and value") {
     sc = new SparkContext("local", "test")
     val outputDir = new File(tempDir, "output").getAbsolutePath
-    val nums = sc.makeRDD(1 to 3).map(x => (new IntWritable(x), new Text("a" * 
x)))
+    val nums = sc.makeRDD(1 to 3).map(x => (new IntWritable(x), new 
Text("a".repeat(x))))
     nums.saveAsSequenceFile(outputDir)
     // Try reading the output back as a SequenceFile
     val output = sc.sequenceFile[IntWritable, Text](outputDir)
@@ -187,7 +187,7 @@ class FileSuite extends SparkFunSuite with 
LocalSparkContext {
   test("implicit conversions in reading SequenceFiles") {
     sc = new SparkContext("local", "test")
     val outputDir = new File(tempDir, "output").getAbsolutePath
-    val nums = sc.makeRDD(1 to 3).map(x => (x, "a" * x)) // (1,a), (2,aa), 
(3,aaa)
+    val nums = sc.makeRDD(1 to 3).map(x => (x, "a".repeat(x))) // (1,a), 
(2,aa), (3,aaa)
     nums.saveAsSequenceFile(outputDir)
     // Similar to the tests above, we read a SequenceFile, but this time we 
pass type params
     // that are convertible to Writable instead of calling 
sequenceFile[IntWritable, Text]
@@ -213,7 +213,7 @@ class FileSuite extends SparkFunSuite with 
LocalSparkContext {
   test("object files of complex types") {
     sc = new SparkContext("local", "test")
     val outputDir = new File(tempDir, "output").getAbsolutePath
-    val nums = sc.makeRDD(1 to 3).map(x => (x, "a" * x))
+    val nums = sc.makeRDD(1 to 3).map(x => (x, "a".repeat(x)))
     nums.saveAsObjectFile(outputDir)
     // Try reading the output back as an object file
     val output = sc.objectFile[(Int, String)](outputDir)
@@ -245,7 +245,7 @@ class FileSuite extends SparkFunSuite with 
LocalSparkContext {
     import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat
     sc = new SparkContext("local", "test")
     val outputDir = new File(tempDir, "output").getAbsolutePath
-    val nums = sc.makeRDD(1 to 3).map(x => (new IntWritable(x), new Text("a" * 
x)))
+    val nums = sc.makeRDD(1 to 3).map(x => (new IntWritable(x), new 
Text("a".repeat(x))))
     nums.saveAsNewAPIHadoopFile[SequenceFileOutputFormat[IntWritable, Text]](
         outputDir)
     val output = sc.sequenceFile[IntWritable, Text](outputDir)
@@ -256,7 +256,7 @@ class FileSuite extends SparkFunSuite with 
LocalSparkContext {
     import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat
     sc = new SparkContext("local", "test")
     val outputDir = new File(tempDir, "output").getAbsolutePath
-    val nums = sc.makeRDD(1 to 3).map(x => (new IntWritable(x), new Text("a" * 
x)))
+    val nums = sc.makeRDD(1 to 3).map(x => (new IntWritable(x), new 
Text("a".repeat(x))))
     nums.saveAsSequenceFile(outputDir)
     val output =
       sc.newAPIHadoopFile[IntWritable, Text, 
SequenceFileInputFormat[IntWritable, Text]](outputDir)
diff --git a/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala 
b/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala
index ebb8609e8c13..5432c5121b94 100644
--- a/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala
+++ b/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala
@@ -37,7 +37,7 @@ abstract class BenchmarkBase {
   def runBenchmarkSuite(mainArgs: Array[String]): Unit
 
   final def runBenchmark(benchmarkName: String)(func: => Any): Unit = {
-    val separator = "=" * 96
+    val separator = "=".repeat(96)
     val testHeader = (separator + '\n' + benchmarkName + '\n' + separator + 
'\n' + '\n').getBytes
     output.foreach(_.write(testHeader))
     func
diff --git 
a/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileReadersSuite.scala
 
b/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileReadersSuite.scala
index 7501a98a1a57..bc0e2bbee9c3 100644
--- 
a/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileReadersSuite.scala
+++ 
b/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileReadersSuite.scala
@@ -242,7 +242,7 @@ class RollingEventLogFilesReaderSuite extends 
EventLogFileReadersSuite {
         SparkHadoopUtil.get.newConfiguration(conf))
 
       writer.start()
-      val dummyStr = "dummy" * 1024
+      val dummyStr = "dummy".repeat(1024)
       writeTestEvents(writer, dummyStr, 1024 * 1024 * 20)
       writer.stop()
 
@@ -275,7 +275,7 @@ class RollingEventLogFilesReaderSuite extends 
EventLogFileReadersSuite {
       writer.start()
 
       // write log more than 20m (intended to roll over to 3 files)
-      val dummyStr = "dummy" * 1024
+      val dummyStr = "dummy".repeat(1024)
       writeTestEvents(writer, dummyStr, 1024 * 1024 * 20)
 
       val logPathIncompleted = getCurrentLogPath(writer.logPath, isCompleted = 
false)
diff --git 
a/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileWritersSuite.scala
 
b/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileWritersSuite.scala
index a2718b973b15..d9d6a4f8d35d 100644
--- 
a/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileWritersSuite.scala
+++ 
b/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileWritersSuite.scala
@@ -322,7 +322,7 @@ class RollingEventLogFilesWriterSuite extends 
EventLogFileWritersSuite {
       writer.start()
 
       // write log more than 20m (intended to roll over to 3 files)
-      val dummyStr = "dummy" * 1024
+      val dummyStr = "dummy".repeat(1024)
       val expectedLines = writeTestEvents(writer, dummyStr, 1024 * 1024 * 21)
 
       val logDirPath = getAppEventLogDirPath(testDirPath.toUri, appId, 
attemptId)
diff --git a/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala 
b/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala
index e1bc750d61b0..33739e8129e5 100644
--- a/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala
@@ -492,10 +492,10 @@ class UtilsSuite extends SparkFunSuite with 
ResetSystemProperties {
     assert(Utils.createDirectory(testDirPath, "scenario1").exists())
 
     // 2. Illegal file path
-    val scenario2 = new File(testDir, "scenario2" * 256)
+    val scenario2 = new File(testDir, "scenario2".repeat(256))
     assert(!Utils.createDirectory(scenario2))
     assert(!scenario2.exists())
-    assertThrows[IOException](Utils.createDirectory(testDirPath, "scenario2" * 
256))
+    assertThrows[IOException](Utils.createDirectory(testDirPath, 
"scenario2".repeat(256)))
 
     // 3. The parent directory cannot read
     val scenario3 = new File(testDir, "scenario3")
diff --git 
a/core/src/test/scala/org/apache/spark/util/collection/SizeTrackerSuite.scala 
b/core/src/test/scala/org/apache/spark/util/collection/SizeTrackerSuite.scala
index 82a4c85b02fa..db69f2b78106 100644
--- 
a/core/src/test/scala/org/apache/spark/util/collection/SizeTrackerSuite.scala
+++ 
b/core/src/test/scala/org/apache/spark/util/collection/SizeTrackerSuite.scala
@@ -40,7 +40,7 @@ class SizeTrackerSuite extends SparkFunSuite {
   test("vector variable size insertions") {
     val rand = new Random(123456789)
     def randString(minLen: Int, maxLen: Int): String = {
-      "a" * (rand.nextInt(maxLen - minLen) + minLen)
+      "a".repeat(rand.nextInt(maxLen - minLen) + minLen)
     }
     testVector[String](10000, i => randString(0, 10))
     testVector[String](10000, i => randString(0, 100))
@@ -56,7 +56,7 @@ class SizeTrackerSuite extends SparkFunSuite {
   test("map variable size insertions") {
     val rand = new Random(123456789)
     def randString(minLen: Int, maxLen: Int): String = {
-      "a" * (rand.nextInt(maxLen - minLen) + minLen)
+      "a".repeat(rand.nextInt(maxLen - minLen) + minLen)
     }
     testMap[Int, String](10000, i => (i, randString(0, 10)))
     testMap[Int, String](10000, i => (i, randString(0, 100)))
@@ -66,7 +66,7 @@ class SizeTrackerSuite extends SparkFunSuite {
   test("map updates") {
     val rand = new Random(123456789)
     def randString(minLen: Int, maxLen: Int): String = {
-      "a" * (rand.nextInt(maxLen - minLen) + minLen)
+      "a".repeat(rand.nextInt(maxLen - minLen) + minLen)
     }
     testMap[String, Int](10000, i => (randString(0, 10000), i))
   }
diff --git 
a/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala 
b/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala
index d15462f6f403..0e38b782f507 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala
@@ -36,7 +36,7 @@ class Word2VecSuite extends MLTest with DefaultReadWriteTest {
   }
 
   test("Word2Vec") {
-    val sentence = "a b " * 100 + "a c " * 10
+    val sentence = "a b ".repeat(100) + "a c ".repeat(10)
     val numOfWords = sentence.split(" ").length
     val doc = sc.parallelize(Seq(sentence, sentence)).map(line => line.split(" 
"))
 
@@ -76,7 +76,7 @@ class Word2VecSuite extends MLTest with DefaultReadWriteTest {
   }
 
   test("getVectors") {
-    val sentence = "a b " * 100 + "a c " * 10
+    val sentence = "a b ".repeat(100) + "a c ".repeat(10)
     val doc = sc.parallelize(Seq(sentence, sentence)).map(line => line.split(" 
"))
     val docDF = doc.zip(doc).toDF("text", "alsotext")
 
@@ -106,7 +106,7 @@ class Word2VecSuite extends MLTest with 
DefaultReadWriteTest {
 
   test("findSynonyms") {
 
-    val sentence = "a b " * 100 + "a c " * 10
+    val sentence = "a b ".repeat(100) + "a c ".repeat(10)
     val doc = sc.parallelize(Seq(sentence, sentence)).map(line => line.split(" 
"))
     val docDF = doc.zip(doc).toDF("text", "alsotext")
 
@@ -138,7 +138,7 @@ class Word2VecSuite extends MLTest with 
DefaultReadWriteTest {
 
   test("window size") {
 
-    val sentence = "a q s t q s t b b b s t m s t m q " * 100 + "a c " * 10
+    val sentence = "a q s t q s t b b b s t m s t m q ".repeat(100) + "a c 
".repeat(10)
     val doc = sc.parallelize(Seq(sentence, sentence)).map(line => line.split(" 
"))
     val docDF = doc.zip(doc).toDF("text", "alsotext")
 
diff --git 
a/mllib/src/test/scala/org/apache/spark/mllib/feature/Word2VecSuite.scala 
b/mllib/src/test/scala/org/apache/spark/mllib/feature/Word2VecSuite.scala
index 0b81a7c8aaf3..5e305262d6f4 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/feature/Word2VecSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/feature/Word2VecSuite.scala
@@ -29,7 +29,7 @@ class Word2VecSuite extends SparkFunSuite with 
MLlibTestSparkContext {
   // TODO: add more tests
 
   test("Word2Vec") {
-    val sentence = "a b " * 100 + "a c " * 10
+    val sentence = "a b ".repeat(100) + "a c ".repeat(10)
     val localDoc = Seq(sentence, sentence)
     val doc = sc.parallelize(localDoc)
       .map(line => line.split(" ").toSeq)
diff --git 
a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/KubernetesConfSuite.scala
 
b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/KubernetesConfSuite.scala
index 61469f8acb7a..33cfd3c7588f 100644
--- 
a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/KubernetesConfSuite.scala
+++ 
b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/KubernetesConfSuite.scala
@@ -254,15 +254,15 @@ class KubernetesConfSuite extends SparkFunSuite {
 
   test("SPARK-36566: get app name label") {
     assert(KubernetesConf.getAppNameLabel(" Job+Spark-Pi 2021") === 
"job-spark-pi-2021")
-    assert(KubernetesConf.getAppNameLabel("a" * 63) === "a" * 63)
-    assert(KubernetesConf.getAppNameLabel("a" * 64) === "a" * 63)
-    assert(KubernetesConf.getAppNameLabel("a" * 253) === "a" * 63)
+    assert(KubernetesConf.getAppNameLabel("a".repeat(63)) === "a".repeat(63))
+    assert(KubernetesConf.getAppNameLabel("a".repeat(64)) === "a".repeat(63))
+    assert(KubernetesConf.getAppNameLabel("a".repeat(253)) === "a".repeat(63))
   }
 
   test("SPARK-38630: K8s label value should start and end with alphanumeric") {
     assert(KubernetesConf.getAppNameLabel("-hello-") === "hello")
-    assert(KubernetesConf.getAppNameLabel("a" * 62 + "-aaa") === "a" * 62)
-    assert(KubernetesConf.getAppNameLabel("-" + "a" * 63) === "a" * 62)
+    assert(KubernetesConf.getAppNameLabel("a".repeat(62) + "-aaa") === 
"a".repeat(62))
+    assert(KubernetesConf.getAppNameLabel("-" + "a".repeat(63)) === 
"a".repeat(62))
   }
 
   test("SPARK-40869: Resource name prefix should not start with a hyphen") {
diff --git 
a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/BasicExecutorFeatureStepSuite.scala
 
b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/BasicExecutorFeatureStepSuite.scala
index 906727e49276..068bce0b2e0e 100644
--- 
a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/BasicExecutorFeatureStepSuite.scala
+++ 
b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/BasicExecutorFeatureStepSuite.scala
@@ -213,7 +213,7 @@ class BasicExecutorFeatureStepSuite extends SparkFunSuite 
with BeforeAndAfter {
 
   test("SPARK-35460: invalid PodNamePrefixes") {
     withPodNamePrefix {
-      Seq("_123", "spark_exec", "spark@", "a" * 238).foreach { invalid =>
+      Seq("_123", "spark_exec", "spark@", "a".repeat(238)).foreach { invalid =>
         baseConf.set(KUBERNETES_EXECUTOR_POD_NAME_PREFIX, invalid)
         checkError(
           exception = 
intercept[SparkIllegalArgumentException](newExecutorConf()),
diff --git 
a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/DriverServiceFeatureStepSuite.scala
 
b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/DriverServiceFeatureStepSuite.scala
index 8d4670393f9d..3b8738a31bce 100644
--- 
a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/DriverServiceFeatureStepSuite.scala
+++ 
b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/DriverServiceFeatureStepSuite.scala
@@ -32,7 +32,7 @@ import org.apache.spark.util.ManualClock
 class DriverServiceFeatureStepSuite extends SparkFunSuite {
 
   private val LONG_RESOURCE_NAME_PREFIX =
-    "a" * (DriverServiceFeatureStep.MAX_SERVICE_NAME_LENGTH -
+    "a".repeat(DriverServiceFeatureStep.MAX_SERVICE_NAME_LENGTH -
       DriverServiceFeatureStep.DRIVER_SVC_POSTFIX.length + 1)
   private val DRIVER_LABELS = Map(
     "label1key" -> "label1value",
diff --git 
a/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/BasicTestsSuite.scala
 
b/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/BasicTestsSuite.scala
index 0dafe30c364a..d710add45eb9 100644
--- 
a/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/BasicTestsSuite.scala
+++ 
b/resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/BasicTestsSuite.scala
@@ -70,7 +70,7 @@ private[spark] trait BasicTestsSuite { k8sSuite: 
KubernetesSuite =>
   }
 
   test("Run SparkPi with a very long application name.", k8sTestTag) {
-    sparkAppConf.set("spark.app.name", "long" * 40)
+    sparkAppConf.set("spark.app.name", "long".repeat(40))
     runSparkPiAndVerifyCompletion()
   }
 
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoderSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoderSuite.scala
index 616c6d65636d..85e8053a877a 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoderSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoderSuite.scala
@@ -796,50 +796,51 @@ class ExpressionEncoderSuite extends 
CodegenInterpretedPlanTest with AnalysisTes
   }
   // Scala / Java big decimals 
----------------------------------------------------------
 
-  encodeDecodeTest(BigDecimal(("9" * 20) + "." + "9" * 18),
+  encodeDecodeTest(BigDecimal("9".repeat(20) + "." + "9".repeat(18)),
     "scala decimal within precision/scale limit")
-  encodeDecodeTest(new java.math.BigDecimal(("9" * 20) + "." + "9" * 18),
+  encodeDecodeTest(new java.math.BigDecimal("9".repeat(20) + "." + 
"9".repeat(18)),
     "java decimal within precision/scale limit")
 
-  encodeDecodeTest(-BigDecimal(("9" * 20) + "." + "9" * 18),
+  encodeDecodeTest(-BigDecimal("9".repeat(20) + "." + "9".repeat(18)),
     "negative scala decimal within precision/scale limit")
-  encodeDecodeTest(new java.math.BigDecimal(("9" * 20) + "." + "9" * 
18).negate,
+  encodeDecodeTest(new java.math.BigDecimal("9".repeat(20) + "." + 
"9".repeat(18)).negate,
     "negative java decimal within precision/scale limit")
 
-  testOverflowingBigNumeric(BigDecimal("1" * 21), "scala big decimal")
-  testOverflowingBigNumeric(new java.math.BigDecimal("1" * 21), "java big 
decimal")
+  testOverflowingBigNumeric(BigDecimal("1".repeat(21)), "scala big decimal")
+  testOverflowingBigNumeric(new java.math.BigDecimal("1".repeat(21)), "java 
big decimal")
 
-  testOverflowingBigNumeric(-BigDecimal("1" * 21), "negative scala big 
decimal")
-  testOverflowingBigNumeric(new java.math.BigDecimal("1" * 21).negate, 
"negative java big decimal")
+  testOverflowingBigNumeric(-BigDecimal("1".repeat(21)), "negative scala big 
decimal")
+  testOverflowingBigNumeric(new java.math.BigDecimal("1".repeat(21)).negate,
+    "negative java big decimal")
 
-  testOverflowingBigNumeric(BigDecimal(("1" * 21) + ".123"),
+  testOverflowingBigNumeric(BigDecimal("1".repeat(21) + ".123"),
     "scala big decimal with fractional part")
-  testOverflowingBigNumeric(new java.math.BigDecimal(("1" * 21) + ".123"),
+  testOverflowingBigNumeric(new java.math.BigDecimal("1".repeat(21) + ".123"),
     "java big decimal with fractional part")
 
-  testOverflowingBigNumeric(BigDecimal(("1" * 21)  + "." + "9999" * 100),
+  testOverflowingBigNumeric(BigDecimal("1".repeat(21)  + "." + 
"9999".repeat(100)),
     "scala big decimal with long fractional part")
-  testOverflowingBigNumeric(new java.math.BigDecimal(("1" * 21)  + "." + 
"9999" * 100),
+  testOverflowingBigNumeric(new java.math.BigDecimal("1".repeat(21)  + "." + 
"9999".repeat(100)),
     "java big decimal with long fractional part")
 
   // Scala / Java big integers 
----------------------------------------------------------
 
-  encodeDecodeTest(BigInt("9" * 38), "scala big integer within precision 
limit")
-  encodeDecodeTest(new BigInteger("9" * 38), "java big integer within 
precision limit")
+  encodeDecodeTest(BigInt("9".repeat(38)), "scala big integer within precision 
limit")
+  encodeDecodeTest(new BigInteger("9".repeat(38)), "java big integer within 
precision limit")
 
-  encodeDecodeTest(-BigInt("9" * 38),
+  encodeDecodeTest(-BigInt("9".repeat(38)),
     "negative scala big integer within precision limit")
-  encodeDecodeTest(new BigInteger("9" * 38).negate(),
+  encodeDecodeTest(new BigInteger("9".repeat(38)).negate(),
     "negative java big integer within precision limit")
 
-  testOverflowingBigNumeric(BigInt("1" * 39), "scala big int")
-  testOverflowingBigNumeric(new BigInteger("1" * 39), "java big integer")
+  testOverflowingBigNumeric(BigInt("1".repeat(39)), "scala big int")
+  testOverflowingBigNumeric(new BigInteger("1".repeat(39)), "java big integer")
 
-  testOverflowingBigNumeric(-BigInt("1" * 39), "negative scala big int")
-  testOverflowingBigNumeric(new BigInteger("1" * 39).negate, "negative java 
big integer")
+  testOverflowingBigNumeric(-BigInt("1".repeat(39)), "negative scala big int")
+  testOverflowingBigNumeric(new BigInteger("1".repeat(39)).negate, "negative 
java big integer")
 
-  testOverflowingBigNumeric(BigInt("9" * 100), "scala very large big int")
-  testOverflowingBigNumeric(new BigInteger("9" * 100), "java very big int")
+  testOverflowingBigNumeric(BigInt("9".repeat(100)), "scala very large big 
int")
+  testOverflowingBigNumeric(new BigInteger("9".repeat(100)), "java very big 
int")
 
   private def testOverflowingBigNumeric[T: TypeTag](bigNumeric: T, testName: 
String): Unit = {
     Seq(true, false).foreach { ansiEnabled =>
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/RowEncoderSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/RowEncoderSuite.scala
index 008aa976d605..09247a459b9c 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/RowEncoderSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/RowEncoderSuite.scala
@@ -199,8 +199,8 @@ class RowEncoderSuite extends CodegenInterpretedPlanTest {
 
   test("SPARK-23179: RowEncoder should respect nullOnOverflow for decimals") {
     val schema = new StructType().add("decimal", DecimalType.SYSTEM_DEFAULT)
-    testDecimalOverflow(schema, Row(BigDecimal("9" * 100)))
-    testDecimalOverflow(schema, Row(new java.math.BigDecimal("9" * 100)))
+    testDecimalOverflow(schema, Row(BigDecimal("9".repeat(100))))
+    testDecimalOverflow(schema, Row(new java.math.BigDecimal("9".repeat(100))))
   }
 
   private def testDecimalOverflow(schema: StructType, row: Row): Unit = {
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ArithmeticExpressionSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ArithmeticExpressionSuite.scala
index 89f0b95f5c18..68cf654208cc 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ArithmeticExpressionSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ArithmeticExpressionSuite.scala
@@ -710,11 +710,11 @@ class ArithmeticExpressionSuite extends SparkFunSuite 
with ExpressionEvalHelper
 
   test("SPARK-22499: Least and greatest should not generate codes beyond 
64KB") {
     val N = 2000
-    val strings = (1 to N).map(x => "s" * x)
+    val strings = (1 to N).map(x => "s".repeat(x))
     val inputsExpr = strings.map(Literal.create(_, StringType))
 
-    checkEvaluation(Least(inputsExpr), "s" * 1, EmptyRow)
-    checkEvaluation(Greatest(inputsExpr), "s" * N, EmptyRow)
+    checkEvaluation(Least(inputsExpr), "s".repeat(1), EmptyRow)
+    checkEvaluation(Greatest(inputsExpr), "s".repeat(N), EmptyRow)
   }
 
   test("SPARK-22704: Least and greatest use less global variables") {
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/DateExpressionsSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/DateExpressionsSuite.scala
index a52aed1bc9aa..f8257e8baf2c 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/DateExpressionsSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/DateExpressionsSuite.scala
@@ -1638,7 +1638,7 @@ class DateExpressionsSuite extends SparkFunSuite with 
ExpressionEvalHelper {
 
     // test overflow for decimal input
     checkExceptionInExpression[ArithmeticException](
-      SecondsToTimestamp(Literal(Decimal("9" * 38))), "Overflow"
+      SecondsToTimestamp(Literal(Decimal("9".repeat(38)))), "Overflow"
     )
     // test truncation error for decimal input
     checkExceptionInExpression[ArithmeticException](
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/JsonExpressionsSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/JsonExpressionsSuite.scala
index 467d0fc36032..2a6cd3e3f0c2 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/JsonExpressionsSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/JsonExpressionsSuite.scala
@@ -254,7 +254,7 @@ class JsonExpressionsSuite extends SparkFunSuite with 
ExpressionEvalHelper {
   }
 
   test("some big value") {
-    val value = "x" * 3000
+    val value = "x".repeat(3000)
     checkEvaluation(
       GetJsonObject(NonFoldableLiteral((s"""{"big": "$value"}""")),
       NonFoldableLiteral("$.big")), value)
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala
index 39b11c1e1717..bca4984cfac9 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala
@@ -522,7 +522,7 @@ class StringExpressionsSuite extends SparkFunSuite with 
ExpressionEvalHelper {
   }
 
   test("SPARK-47307: base64 encoding without chunking") {
-    val longString = "a" * 58
+    val longString = "a".repeat(58)
     val encoded = 
"YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYQ=="
     withSQLConf(SQLConf.CHUNK_BASE64_STRING_ENABLED.key -> "false") {
       checkEvaluation(Base64(Literal(longString.getBytes)), encoded)
@@ -954,7 +954,7 @@ class StringExpressionsSuite extends SparkFunSuite with 
ExpressionEvalHelper {
   test("SPARK-22603: FormatString should not generate codes beyond 64KB") {
     val N = 4500
     val args = (1 to N).map(i => Literal.create(i.toString, StringType))
-    val format = "%s" * N
+    val format = "%s".repeat(N)
     val expected = (1 to N).map(i => i.toString).mkString
     checkEvaluation(FormatString(Literal(format) +: args: _*), expected)
   }
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/variant/VariantExpressionEvalUtilsSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/variant/VariantExpressionEvalUtilsSuite.scala
index f599fead4501..2aef7c455e64 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/variant/VariantExpressionEvalUtilsSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/variant/VariantExpressionEvalUtilsSuite.scala
@@ -65,18 +65,18 @@ class VariantExpressionEvalUtilsSuite extends SparkFunSuite 
{
     check("0.000000001", Array(primitiveHeader(DECIMAL4), 9, 1, 0, 0, 0), 
emptyMetadata)
     check("0.0000000001",
       Array(primitiveHeader(DECIMAL8), 10, 1, 0, 0, 0, 0, 0, 0, 0), 
emptyMetadata)
-    check("9" * 38,
-      Array[Byte](primitiveHeader(DECIMAL16), 0) ++ BigInt("9" * 
38).toByteArray.reverse,
+    check("9".repeat(38),
+      Array[Byte](primitiveHeader(DECIMAL16), 0) ++ 
BigInt("9".repeat(38)).toByteArray.reverse,
       emptyMetadata)
-    check("1" + "0" * 38,
+    check("1" + "0".repeat(38),
       Array(primitiveHeader(DOUBLE)) ++
         BigInt(java.lang.Double.doubleToLongBits(1E38)).toByteArray.reverse,
       emptyMetadata)
     check("\"\"", Array(shortStrHeader(0)), emptyMetadata)
     check("\"abcd\"", Array(shortStrHeader(4), 'a', 'b', 'c', 'd'), 
emptyMetadata)
-    check("\"" + ("x" * 63) + "\"",
+    check("\"" + "x".repeat(63) + "\"",
       Array(shortStrHeader(63)) ++ Array.fill(63)('x'.toByte), emptyMetadata)
-    check("\"" + ("y" * 64) + "\"",
+    check("\"" + "y".repeat(64) + "\"",
       Array[Byte](primitiveHeader(LONG_STR), 64, 0, 0, 0) ++ 
Array.fill(64)('y'.toByte),
       emptyMetadata)
     check("{}", Array(objectHeader(false, 1, 1),
@@ -133,7 +133,7 @@ class VariantExpressionEvalUtilsSuite extends SparkFunSuite 
{
       checkException(json, "MALFORMED_RECORD_IN_PARSING.WITHOUT_SUGGESTION",
         Map("badRecord" -> json, "failFastMode" -> "FAILFAST"))
     }
-    for (json <- Seq("\"" + "a" * (16 * 1024 * 1024) + "\"",
+    for (json <- Seq("\"" + "a".repeat(16 * 1024 * 1024) + "\"",
       (0 to 4 * 1024 * 1024).mkString("[", ",", "]"))) {
       checkException(json, "VARIANT_SIZE_LIMIT",
         Map("sizeLimit" -> "16.0 MiB", "functionName" -> "`parse_json`"))
@@ -166,7 +166,7 @@ class VariantExpressionEvalUtilsSuite extends SparkFunSuite 
{
     check("false", expected = false)
     check("false", expected = false)
     check("65.43", expected = false)
-    check("\"" + "spark" * 100 + "\"", expected = false)
+    check("\"" + "spark".repeat(100) + "\"", expected = false)
     // Short String
     check("\"\"", expected = false)
     check("\"null\"", expected = false)
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/variant/VariantExpressionSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/variant/VariantExpressionSuite.scala
index d5a2e99bbed7..baf588d8472c 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/variant/VariantExpressionSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/variant/VariantExpressionSuite.scala
@@ -900,11 +900,11 @@ class VariantExpressionSuite extends SparkFunSuite with 
ExpressionEvalHelper {
       check(input, input.toString)
     }
     for (precision <- Seq(9, 18, 38)) {
-      val input = BigDecimal("9" * precision)
+      val input = BigDecimal("9".repeat(precision))
       check(Literal.create(input, DecimalType(precision, 0)), input.toString)
     }
     check("", "\"\"")
-    check("x" * 128, "\"" + ("x" * 128) + "\"")
+    check("x".repeat(128), "\"" + "x".repeat(128) + "\"")
     check(Array[Byte](1, 2, 3), "\"AQID\"")
     check(Literal(0, DateType), "\"1970-01-01\"")
 
diff --git 
a/sql/connect/client/jvm/src/test/scala/org/apache/spark/sql/connect/ClientE2ETestSuite.scala
 
b/sql/connect/client/jvm/src/test/scala/org/apache/spark/sql/connect/ClientE2ETestSuite.scala
index 641141bda849..cc297f4c4987 100644
--- 
a/sql/connect/client/jvm/src/test/scala/org/apache/spark/sql/connect/ClientE2ETestSuite.scala
+++ 
b/sql/connect/client/jvm/src/test/scala/org/apache/spark/sql/connect/ClientE2ETestSuite.scala
@@ -120,7 +120,7 @@ class ClientE2ETestSuite
       import session.implicits._
 
       val throwException =
-        udf((_: String) => throw new SparkException("test" * 10000))
+        udf((_: String) => throw new SparkException("test".repeat(10000)))
 
       val ex = intercept[SparkException] {
         Seq("1").toDS().withColumn("udf_val", 
throwException($"value")).collect()
@@ -133,7 +133,7 @@ class ClientE2ETestSuite
       val cause = ex.getCause.asInstanceOf[SparkException]
       assert(cause.getCondition == null)
       assert(cause.getMessageParameters.isEmpty)
-      assert(cause.getMessage.contains("test" * 10000))
+      assert(cause.getMessage.contains("test".repeat(10000)))
     }
   }
 
diff --git 
a/sql/connect/server/src/test/scala/org/apache/spark/sql/connect/messages/AbbreviateSuite.scala
 
b/sql/connect/server/src/test/scala/org/apache/spark/sql/connect/messages/AbbreviateSuite.scala
index 94d92a264d20..41271a874cc8 100644
--- 
a/sql/connect/server/src/test/scala/org/apache/spark/sql/connect/messages/AbbreviateSuite.scala
+++ 
b/sql/connect/server/src/test/scala/org/apache/spark/sql/connect/messages/AbbreviateSuite.scala
@@ -27,7 +27,7 @@ import org.apache.spark.sql.connect.common.{ProtoDataTypes, 
ProtoUtils}
 class AbbreviateSuite extends SparkFunSuite {
 
   test("truncate string: simple SQL text") {
-    val message = proto.SQL.newBuilder().setQuery("x" * 1024).build()
+    val message = proto.SQL.newBuilder().setQuery("x".repeat(1024)).build()
 
     Seq(1, 16, 256, 512, 1024, 2048).foreach { threshold =>
       val truncated = ProtoUtils.abbreviate(message, threshold)
@@ -47,7 +47,7 @@ class AbbreviateSuite extends SparkFunSuite {
       .setSql(
         proto.SQL
           .newBuilder()
-          .setQuery("x" * 1024)
+          .setQuery("x".repeat(1024))
           .build())
       .build()
     val drop = proto.Relation
@@ -214,7 +214,10 @@ class AbbreviateSuite extends SparkFunSuite {
   test("truncate map<string, string>") {
     val read = proto.Read.NamedTable
       .newBuilder()
-      .putAllOptions(Map("k1" * 4096 -> "v1" * 4096, "k2" * 4096 -> "v2" * 
4096).asJava)
+      .putAllOptions(
+        Map(
+          "k1".repeat(4096) -> "v1".repeat(4096),
+          "k2".repeat(4096) -> "v2".repeat(4096)).asJava)
       .build()
 
     val threshold = 1024
@@ -237,14 +240,14 @@ class AbbreviateSuite extends SparkFunSuite {
             .newBuilder()
             .setUnresolvedAttribute(proto.Expression.UnresolvedAttribute
               .newBuilder()
-              .setUnparsedIdentifier("v1" * 4096)
+              .setUnparsedIdentifier("v1".repeat(4096))
               .build())
             .build(),
           "k2" -> proto.Expression
             .newBuilder()
             .setUnresolvedAttribute(proto.Expression.UnresolvedAttribute
               .newBuilder()
-              .setUnparsedIdentifier("v2" * 4096)
+              .setUnparsedIdentifier("v2".repeat(4096))
               .build())
             .build()).asJava)
       .build()
@@ -272,7 +275,7 @@ class AbbreviateSuite extends SparkFunSuite {
           .newBuilder()
           .setQuery(
             // Level 5.
-            "x" * (threshold + 32))
+            "x".repeat(threshold + 32))
           .build())
       .build()
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
index 484025c8935c..48632ed8208f 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
@@ -182,7 +182,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils
 
   test("too big for memory") {
     withTempView("bigData") {
-      val data = "*" * 1000
+      val data = "*".repeat(1000)
       sparkContext.parallelize(1 to 200000, 1).map(_ => BigData(data)).toDF()
         .createOrReplaceTempView("bigData")
       spark.table("bigData").persist(StorageLevel.MEMORY_AND_DISK)
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/CharVarcharTestSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/CharVarcharTestSuite.scala
index 6139d0e98767..02abb701b5df 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/CharVarcharTestSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/CharVarcharTestSuite.scala
@@ -77,7 +77,7 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
     Seq("CHAR(5)", "VARCHAR(5)").foreach { typ =>
       withTable("t") {
         sql(s"CREATE TABLE t(i STRING, c $typ) USING $format")
-        (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        (0 to 5).map(n => "a" + " ".repeat(n)).foreach { v =>
           sql(s"INSERT OVERWRITE t VALUES ('1', '$v')")
           checkPlainResult(spark.table("t"), typ, v)
         }
@@ -112,7 +112,7 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
     // via dynamic partitioned columns
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY 
(c)")
-      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+      (0 to 5).map(n => "a" + " ".repeat(n)).foreach { v =>
         sql(s"INSERT OVERWRITE t VALUES ('1', '$v')")
         checkPlainResult(spark.table("t"), "CHAR(5)", v)
       }
@@ -120,7 +120,7 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
 
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY 
(c)")
-      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+      (0 to 5).map(n => "a" + " ".repeat(n)).foreach { v =>
         // via dynamic partitioned columns with drop partition command
         sql(s"INSERT INTO t VALUES ('1', '$v')")
         checkPlainResult(spark.table("t"), "CHAR(5)", v)
@@ -160,7 +160,7 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
       // https://issues.apache.org/jira/browse/SPARK-34192
       withTable("t") {
         sql(s"CREATE TABLE t(i STRING, c VARCHAR(5)) USING $format PARTITIONED 
BY (c)")
-        val v = "a" + " " * n
+        val v = "a" + " ".repeat(n)
         // via dynamic partitioned columns
         sql(s"INSERT INTO t VALUES ('1', '$v')")
         checkPlainResult(spark.table("t"), "VARCHAR(5)", v)
@@ -211,7 +211,7 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c STRUCT<c: CHAR(5)>) USING $format")
       sql("INSERT INTO t VALUES ('1', struct('a'))")
-      checkAnswer(spark.table("t"), Row("1", Row("a" + " " * 4)))
+      checkAnswer(spark.table("t"), Row("1", Row("a" + " ".repeat(4))))
       checkColType(spark.table("t").schema(1), new StructType().add("c", 
CharType(5)))
 
       sql("INSERT OVERWRITE t VALUES ('1', null)")
@@ -225,7 +225,7 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c ARRAY<CHAR(5)>) USING $format")
       sql("INSERT INTO t VALUES ('1', array('a', 'ab'))")
-      checkAnswer(spark.table("t"), Row("1", Seq("a" + " " * 4, "ab" + " " * 
3)))
+      checkAnswer(spark.table("t"), Row("1", Seq("a" + " ".repeat(4), "ab" + " 
".repeat(3))))
       checkColType(spark.table("t").schema(1), ArrayType(CharType(5)))
 
       sql("INSERT OVERWRITE t VALUES ('1', null)")
@@ -239,7 +239,7 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c MAP<CHAR(5), STRING>) USING $format")
       sql("INSERT INTO t VALUES ('1', map('a', 'ab'))")
-      checkAnswer(spark.table("t"), Row("1", Map(("a" + " " * 4, "ab"))))
+      checkAnswer(spark.table("t"), Row("1", Map(("a" + " ".repeat(4), "ab"))))
       checkColType(spark.table("t").schema(1), MapType(CharType(5), 
StringType))
 
       sql("INSERT OVERWRITE t VALUES ('1', null)")
@@ -251,7 +251,7 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c MAP<STRING, CHAR(5)>) USING $format")
       sql("INSERT INTO t VALUES ('1', map('a', 'ab'))")
-      checkAnswer(spark.table("t"), Row("1", Map(("a", "ab" + " " * 3))))
+      checkAnswer(spark.table("t"), Row("1", Map(("a", "ab" + " ".repeat(3)))))
       checkColType(spark.table("t").schema(1), MapType(StringType, 
CharType(5)))
 
       sql("INSERT OVERWRITE t VALUES ('1', null)")
@@ -265,7 +265,7 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c MAP<CHAR(5), CHAR(10)>) USING $format")
       sql("INSERT INTO t VALUES ('1', map('a', 'ab'))")
-      checkAnswer(spark.table("t"), Row("1", Map(("a" + " " * 4, "ab" + " " * 
8))))
+      checkAnswer(spark.table("t"), Row("1", Map(("a" + " ".repeat(4), "ab" + 
" ".repeat(8)))))
       checkColType(spark.table("t").schema(1), MapType(CharType(5), 
CharType(10)))
 
       sql("INSERT OVERWRITE t VALUES ('1', null)")
@@ -277,7 +277,7 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c STRUCT<c: ARRAY<CHAR(5)>>) USING 
$format")
       sql("INSERT INTO t VALUES ('1', struct(array('a', 'ab')))")
-      checkAnswer(spark.table("t"), Row("1", Row(Seq("a" + " " * 4, "ab" + " " 
* 3))))
+      checkAnswer(spark.table("t"), Row("1", Row(Seq("a" + " ".repeat(4), "ab" 
+ " ".repeat(3)))))
       checkColType(spark.table("t").schema(1),
         new StructType().add("c", ArrayType(CharType(5))))
 
@@ -294,7 +294,8 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c ARRAY<STRUCT<c: CHAR(5)>>) USING 
$format")
       sql("INSERT INTO t VALUES ('1', array(struct('a'), struct('ab')))")
-      checkAnswer(spark.table("t"), Row("1", Seq(Row("a" + " " * 4), Row("ab" 
+ " " * 3))))
+      checkAnswer(spark.table("t"),
+        Row("1", Seq(Row("a" + " ".repeat(4)), Row("ab" + " ".repeat(3)))))
       checkColType(spark.table("t").schema(1),
         ArrayType(new StructType().add("c", CharType(5))))
 
@@ -311,7 +312,7 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c ARRAY<ARRAY<CHAR(5)>>) USING $format")
       sql("INSERT INTO t VALUES ('1', array(array('a', 'ab')))")
-      checkAnswer(spark.table("t"), Row("1", Seq(Seq("a" + " " * 4, "ab" + " " 
* 3))))
+      checkAnswer(spark.table("t"), Row("1", Seq(Seq("a" + " ".repeat(4), "ab" 
+ " ".repeat(3)))))
       checkColType(spark.table("t").schema(1), 
ArrayType(ArrayType(CharType(5))))
 
       sql("INSERT OVERWRITE t VALUES ('1', null)")
@@ -425,7 +426,7 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
       sql("INSERT INTO t VALUES ('12 ', '12 ')")
       sql("INSERT INTO t VALUES ('1234  ', '1234  ')")
       checkAnswer(spark.table("t"), Seq(
-        Row("12" + " " * 3, "12 "),
+        Row("12" + " ".repeat(3), "12 "),
         Row("1234 ", "1234 ")))
     }
   }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/CsvFunctionsSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/CsvFunctionsSuite.scala
index f5cca28dc01d..212e6ff64b29 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/CsvFunctionsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/CsvFunctionsSuite.scala
@@ -825,7 +825,7 @@ class CsvFunctionsSuite extends QueryTest with 
SharedSparkSession {
 
     // In singleVariantColumn mode, from_csv normally treats all inputs as 
valid. The only exception
     // case is the input exceeds the variant size limit (16MiB).
-    val largeInput = "a" * (16 * 1024 * 1024)
+    val largeInput = "a".repeat(16 * 1024 * 1024)
     checkAnswer(
       Seq(largeInput).toDF("value").select(
         from_csv(
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
index 7210749c0016..656c739af246 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
@@ -2364,7 +2364,7 @@ class DataFrameAggregateSuite extends QueryTest
             val expectedAnswer = Row(null)
             assertDecimalSumOverflow(df2, ansiEnabled, fnName, expectedAnswer)
 
-            val decStr = "1" + "0" * 19
+            val decStr = "1" + "0".repeat(19)
             val d1 = spark.range(0, 12, 1, 1)
             val d2 = d1.select(expr(s"cast('$decStr' as decimal (38, 18)) as 
d")).agg(aggFn($"d"))
             assertDecimalSumOverflow(d2, ansiEnabled, fnName, expectedAnswer)
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/MathFunctionsSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/MathFunctionsSuite.scala
index f1d0815c181b..23d3fc3d2fe5 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/MathFunctionsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/MathFunctionsSuite.scala
@@ -246,7 +246,8 @@ class MathFunctionsSuite extends QueryTest with 
SharedSparkSession {
 
   test("SPARK-36229 inconsistently behaviour where returned value is above the 
64 char threshold") {
     withSQLConf(SQLConf.ANSI_ENABLED.key -> false.toString) {
-      val df = Seq(("?" * 64), ("?" * 65), ("a" * 4 + "?" * 60), ("a" * 4 + 
"?" * 61)).toDF("num")
+      val df = Seq(("?".repeat(64)), ("?".repeat(65)), ("a".repeat(4) + 
"?".repeat(60)),
+          ("a".repeat(4) + "?".repeat(61))).toDF("num")
       val expectedResult = Seq(Row("0"), Row("0"), Row("43690"), Row("43690"))
       checkAnswer(df.select(conv($"num", 16, 10)), expectedResult)
       checkAnswer(df.select(conv($"num", 16, -10)), expectedResult)
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/PlanStabilitySuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/PlanStabilitySuite.scala
index d78328935982..62a1c5211b77 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/PlanStabilitySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/PlanStabilitySuite.scala
@@ -212,7 +212,7 @@ trait PlanStabilitySuite extends 
DisableAdaptiveExecutionSuite {
      *     Project [c_customer_id]
      */
     def simplifyNode(node: SparkPlan, depth: Int): String = {
-      val padding = "  " * depth
+      val padding = "  ".repeat(depth)
       var thisNode = node.nodeName
       if (node.references.nonEmpty) {
         thisNode += s" [${cleanUpReferences(node.references)}]"
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/VariantEndToEndSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/VariantEndToEndSuite.scala
index a40e34d94d08..8a0e2c296531 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/VariantEndToEndSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/VariantEndToEndSuite.scala
@@ -51,10 +51,10 @@ class VariantEndToEndSuite extends QueryTest with 
SharedSparkSession {
     check("-1")
     check("1.0E10")
     check("\"\"")
-    check("\"" + ("a" * 63) + "\"")
-    check("\"" + ("b" * 64) + "\"")
+    check("\"" + "a".repeat(63) + "\"")
+    check("\"" + "b".repeat(64) + "\"")
     // scalastyle:off nonascii
-    check("\"" + ("你好,世界" * 20) + "\"")
+    check("\"" + "你好,世界".repeat(20) + "\"")
     // scalastyle:on nonascii
     check("[]")
     check("{}")
@@ -87,10 +87,10 @@ class VariantEndToEndSuite extends QueryTest with 
SharedSparkSession {
     check("-1")
     check("1.0E10")
     check("\"\"")
-    check("\"" + ("a" * 63) + "\"")
-    check("\"" + ("b" * 64) + "\"")
+    check("\"" + "a".repeat(63) + "\"")
+    check("\"" + "b".repeat(64) + "\"")
     // scalastyle:off nonascii
-    check("\"" + ("你好,世界" * 20) + "\"")
+    check("\"" + "你好,世界".repeat(20) + "\"")
     // scalastyle:on nonascii
     check("[]")
     check("{}")
@@ -137,10 +137,10 @@ class VariantEndToEndSuite extends QueryTest with 
SharedSparkSession {
     check("-1")
     check("1.0E10")
     check("\"\"")
-    check("\"" + ("a" * 63) + "\"")
-    check("\"" + ("b" * 64) + "\"")
+    check("\"" + "a".repeat(63) + "\"")
+    check("\"" + "b".repeat(64) + "\"")
     // scalastyle:off nonascii
-    check("\"" + ("你好,世界" * 20) + "\"")
+    check("\"" + "你好,世界".repeat(20) + "\"")
     // scalastyle:on nonascii
     check("[]")
     check("{}")
@@ -155,7 +155,7 @@ class VariantEndToEndSuite extends QueryTest with 
SharedSparkSession {
     check("{1:2}", null)
     check("{\"a\":1", null)
     check("{\"a\":[a,b,c]}", null)
-    check("\"" + "a" * (16 * 1024 * 1024) + "\"", null)
+    check("\"" + "a".repeat(16 * 1024 * 1024) + "\"", null)
   }
 
   test("to_json with nested variant") {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/VariantSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/VariantSuite.scala
index 95cee3b99599..ac6a4e435709 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/VariantSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/VariantSuite.scala
@@ -48,7 +48,7 @@ class VariantSuite extends QueryTest with SharedSparkSession 
with ExpressionEval
         .map(_.get(0).asInstanceOf[VariantVal].toString)
         .sorted
         .toSeq
-      val expected = (1 until 10).map(id => "1" * id)
+      val expected = (1 until 10).map(id => "1".repeat(id))
       assert(result == expected)
     }
 
@@ -290,7 +290,7 @@ class VariantSuite extends QueryTest with 
SharedSparkSession with ExpressionEval
         .map(_.get(0).asInstanceOf[VariantVal].toString)
         .sorted
         .toSeq
-      val expected = (1 until 10).map(id => "1" * id)
+      val expected = (1 until 10).map(id => "1".repeat(id))
       assert(result == expected)
     }
 
@@ -831,7 +831,7 @@ class VariantSuite extends QueryTest with 
SharedSparkSession with ExpressionEval
   }
 
   test("variant_get size") {
-    val largeKey = "x" * 1000
+    val largeKey = "x".repeat(1000)
     val df = Seq(s"""{ "$largeKey": {"a" : 1 },
                        "b" : 2,
                        "c": [1,2,3,{"$largeKey": 4}] }""").toDF("json")
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/SortSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/SortSuite.scala
index 4cb9ae7cbc17..ea7d9386083e 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SortSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SortSuite.scala
@@ -82,8 +82,9 @@ class SortSuite extends SparkPlanTest with SharedSparkSession 
{
   test("sorting does not crash for large inputs") {
     val sortOrder = $"a".asc :: Nil
     val stringLength = 1024 * 1024 * 2
+    val df = Seq(Tuple1("a".repeat(stringLength)), 
Tuple1("b".repeat(stringLength))).toDF("a")
     checkThatPlansAgree(
-      Seq(Tuple1("a" * stringLength), Tuple1("b" * 
stringLength)).toDF("a").repartition(1),
+      df.repartition(1),
       SortExec(sortOrder, global = true, _: SparkPlan, testSpillFrequency = 1),
       ReferenceSort(sortOrder, global = true, _: SparkPlan),
       sortAnswers = false
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/Base64Benchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/Base64Benchmark.scala
index 3ad6baea84f2..bbc0ff968541 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/Base64Benchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/Base64Benchmark.scala
@@ -36,14 +36,14 @@ object Base64Benchmark extends SqlBasedBenchmark {
   private val N = 20L * 1000 * 1000
 
   private def doEncode(len: Int, f: Array[Byte] => Array[Byte]): Unit = {
-    spark.range(N).map(_ => "Spark" * len).foreach { s =>
+    spark.range(N).map(_ => "Spark".repeat(len)).foreach { s =>
       f(s.getBytes)
       ()
     }
   }
 
   private def doDecode(len: Int, f: Array[Byte] => Array[Byte]): Unit = {
-    spark.range(N).map(_ => "Spark" * len).map { s =>
+    spark.range(N).map(_ => "Spark".repeat(len)).map { s =>
       // using the same encode func
       java.util.Base64.getMimeEncoder.encode(s.getBytes)
     }.foreach { s =>
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/CharVarcharBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/CharVarcharBenchmark.scala
index 05148f5494e9..02529bb11ab7 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/CharVarcharBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/CharVarcharBenchmark.scala
@@ -62,7 +62,7 @@ object CharVarcharBenchmark extends SqlBasedBenchmark {
             createTable(tblName, colType, path)
             spark.range(card).map { _ =>
               if (hasSpaces) {
-                "st" + " " * length
+                "st" + " ".repeat(length)
               } else {
                 "st"
               }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
index ff57a447d3b7..84c5d1919f8f 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
@@ -362,11 +362,11 @@ object DataSourceReadBenchmark extends SqlBasedBenchmark {
       withTempTable("t1", "parquetV1Table", "parquetV2Table", "orcTable") {
         import spark.implicits._
         spark.range(values).map(_ => Random.nextLong()).map { x =>
-          val arrayOfStructColumn = (0 until 5).map(i => (x + i, s"$x" * 5))
+          val arrayOfStructColumn = (0 until 5).map(i => (x + i, 
s"$x".repeat(5)))
           val mapOfStructColumn = Map(
-            s"$x" -> (x * 0.1, (x, s"$x" * 100)),
-            (s"$x" * 2) -> (x * 0.2, (x, s"$x" * 200)),
-            (s"$x" * 3) -> (x * 0.3, (x, s"$x" * 300)))
+            s"$x" -> (x * 0.1, (x, s"$x".repeat(100))),
+            (s"$x".repeat(2)) -> (x * 0.2, (x, s"$x".repeat(200))),
+            (s"$x".repeat(3)) -> (x * 0.3, (x, s"$x".repeat(300))))
           (arrayOfStructColumn, mapOfStructColumn)
         }.toDF("col1", "col2").createOrReplaceTempView("t1")
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/LargeRowBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/LargeRowBenchmark.scala
index 8b4f78e79913..7b37f637468b 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/LargeRowBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/LargeRowBenchmark.scala
@@ -40,7 +40,7 @@ object LargeRowBenchmark extends SqlBasedBenchmark {
   private def writeLargeRow(path: String, rowsNum: Int, numCols: Int, 
cellSizeMb: Double): Unit = {
     val stringLength = (cellSizeMb * 1024 * 1024).toInt
     spark.range(rowsNum)
-      .select(Seq.tabulate(numCols)(i => lit("a" * 
stringLength).as(s"col$i")): _*)
+      .select(Seq.tabulate(numCols)(i => 
lit("a".repeat(stringLength)).as(s"col$i")): _*)
       .write.parquet(path)
   }
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/NestedSchemaPruningBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/NestedSchemaPruningBenchmark.scala
index 90fad7f36b86..f7569df8237b 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/NestedSchemaPruningBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/NestedSchemaPruningBenchmark.scala
@@ -41,8 +41,8 @@ abstract class NestedSchemaPruningBenchmark extends 
SqlBasedBenchmark {
     .range(N * 10)
     .sample(false, 0.1)
     .map { x =>
-      val col3 = (0 until 5).map(i => (x + i, s"$x" * 5))
-      (x, (x, s"$x" * 100), col3)
+      val col3 = (0 until 5).map(i => (x + i, s"$x".repeat(5)))
+      (x, (x, s"$x".repeat(100)), col3)
     }.toDF("col1", "col2", "col3")
 
   private def addCase(benchmark: Benchmark, name: String, sql: String): Unit = 
{
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
index 4faeae51ca58..afeca756208e 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
@@ -683,7 +683,7 @@ class FileSourceStrategySuite extends QueryTest with 
SharedSparkSession {
       case (name, size) =>
         val file = new File(tempDir, name)
         assert(file.getParentFile.exists() || 
Utils.createDirectory(file.getParentFile))
-        util.stringToFile(file, "*" * size)
+        util.stringToFile(file, "*".repeat(size))
     }
 
     val df = spark.read
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
index 3ff7da44e222..29d28552c320 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
@@ -2596,7 +2596,7 @@ abstract class CSVSuite
   test("SPARK-28431: prevent CSV datasource throw TextParsingException with 
large size message") {
     withTempPath { path =>
       val maxCharsPerCol = 10000
-      val str = "a" * (maxCharsPerCol + 1)
+      val str = "a".repeat(maxCharsPerCol + 1)
 
       Files.write(
         path.toPath,
@@ -2906,7 +2906,7 @@ abstract class CSVSuite
 
   test("SPARK-34768: counting a long record with ignoreTrailingWhiteSpace set 
to true") {
     val bufSize = 128
-    val line = "X" * (bufSize - 1) + "| |"
+    val line = "X".repeat(bufSize - 1) + "| |"
     withTempPath { path =>
       Seq(line).toDF().write.text(path.getAbsolutePath)
       assert(spark.read.format("csv")
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonBenchmark.scala
index 02ed2a16d113..94a2ccc41d30 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonBenchmark.scala
@@ -109,7 +109,7 @@ object JsonBenchmark extends SqlBasedBenchmark {
   def writeWideColumn(path: String, rowsNum: Int): StructType = {
     spark.sparkContext.range(0, rowsNum, 1)
       .map { i =>
-        val s = "abcdef0123456789ABCDEF" * 20
+        val s = "abcdef0123456789ABCDEF".repeat(20)
         s"""{"a":"$s","b": 
$i,"c":"$s","d":$i,"e":"$s","f":$i,"x":"$s","y":$i,"z":"$s"}"""
       }
       .toDF().write.text(path)
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
index 0bc0dfeff05c..3de2d9cce1b7 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
@@ -796,7 +796,7 @@ abstract class JsonSuite
   test("Find compatible types even if inferred DecimalType is not capable of 
other IntegralType") {
     val mixedIntegerAndDoubleRecords = Seq(
       """{"a": 3, "b": 1.1}""",
-      s"""{"a": 3.1, "b": 0.${"0" * 38}1}""").toDS()
+      s"""{"a": 3.1, "b": 0.${"0".repeat(38)}1}""").toDS()
     val jsonDF = spark.read
       .option("prefersDecimal", "true")
       .json(mixedIntegerAndDoubleRecords)
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/TestJsonData.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/TestJsonData.scala
index 6fa2bdfbfe75..6d5a4d9c0ad7 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/TestJsonData.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/TestJsonData.scala
@@ -217,11 +217,11 @@ private[json] trait TestJsonData {
 
   def floatingValueRecords: Dataset[String] =
     spark.createDataset(spark.sparkContext.parallelize(
-      s"""{"a": 0.${"0" * 38}1, "b": 0.01}""" :: Nil))(Encoders.STRING)
+      s"""{"a": 0.${"0".repeat(38)}1, "b": 0.01}""" :: Nil))(Encoders.STRING)
 
   def bigIntegerRecords: Dataset[String] =
     spark.createDataset(spark.sparkContext.parallelize(
-      s"""{"a": 1${"0" * 38}, "b": 92233720368547758070}""" :: 
Nil))(Encoders.STRING)
+      s"""{"a": 1${"0".repeat(38)}, "b": 92233720368547758070}""" :: 
Nil))(Encoders.STRING)
 
   def datesRecords: Dataset[String] =
     spark.createDataset(spark.sparkContext.parallelize(
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala
index ab0d4d9bc53b..a52336524194 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala
@@ -737,13 +737,13 @@ abstract class OrcQuerySuite extends OrcQueryTest with 
SharedSparkSession {
     withTempPath { dir =>
       val path = dir.getCanonicalPath
       val df = spark.range(10).map { x =>
-        val stringColumn = s"$x" * 10
-        val structColumn = (x, s"$x" * 100)
-        val arrayColumn = (0 until 5).map(i => (x + i, s"$x" * 5))
+        val stringColumn = s"$x".repeat(10)
+        val structColumn = (x, s"$x".repeat(100))
+        val arrayColumn = (0 until 5).map(i => (x + i, s"$x".repeat(5)))
         val mapColumn = Map(
-          s"$x" -> (x * 0.1, (x, s"$x" * 100)),
-          (s"$x" * 2) -> (x * 0.2, (x, s"$x" * 200)),
-          (s"$x" * 3) -> (x * 0.3, (x, s"$x" * 300)))
+          s"$x" -> (x * 0.1, (x, s"$x".repeat(100))),
+          (s"$x".repeat(2)) -> (x * 0.2, (x, s"$x".repeat(200))),
+          (s"$x".repeat(3)) -> (x * 0.3, (x, s"$x".repeat(300))))
         (x, stringColumn, structColumn, arrayColumn, mapColumn)
       }.toDF("int_col", "string_col", "struct_col", "array_col", "map_col")
       df.write.format("orc").save(path)
@@ -786,10 +786,10 @@ abstract class OrcQuerySuite extends OrcQueryTest with 
SharedSparkSession {
     withTempPath { dir =>
       val path = dir.getCanonicalPath
       val df = spark.range(10).map { x =>
-        val stringColumn = s"$x" * 10
-        val structColumn = (x, s"$x" * 100)
-        val arrayColumn = (0 until 5).map(i => (x + i, s"$x" * 5))
-        val mapColumn = Map(s"$x" -> (x * 0.1, (x, s"$x" * 100)))
+        val stringColumn = s"$x".repeat(10)
+        val structColumn = (x, s"$x".repeat(100))
+        val arrayColumn = (0 until 5).map(i => (x + i, s"$x".repeat(5)))
+        val mapColumn = Map(s"$x" -> (x * 0.1, (x, s"$x".repeat(100))))
         (x, stringColumn, structColumn, arrayColumn, mapColumn)
       }.toDF("int_col", "string_col", "struct_col", "array_col", "map_col")
       df.write.format("orc").save(path)
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnIndexSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnIndexSuite.scala
index e1e44697be7f..4e103e771d16 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnIndexSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnIndexSuite.scala
@@ -59,7 +59,7 @@ class ParquetColumnIndexSuite extends QueryTest with 
ParquetTest with SharedSpar
   }
 
   test("reading from unaligned pages - test filters") {
-    val df = spark.range(0, 2000).map(i => (i, s"$i:${"o" * (i / 
100).toInt}")).toDF()
+    val df = spark.range(0, 2000).map(i => (i, s"$i:${"o".repeat((i / 
100).toInt)}")).toDF()
     checkUnalignedPages(df)(actions: _*)
   }
 
@@ -97,14 +97,14 @@ class ParquetColumnIndexSuite extends QueryTest with 
ParquetTest with SharedSpar
     // insert 50 null values in [400, 450) to verify that they are skipped 
during processing row
     // range [500, 1000) against the second page of col_2 [400, 800)
     val df = spark.range(0, 2000).map { i =>
-      val strVal = if (i >= 400 && i < 450) null else s"$i:${"o" * (i / 
100).toInt}"
+      val strVal = if (i >= 400 && i < 450) null else s"$i:${"o".repeat((i / 
100).toInt)}"
       (i, strVal)
     }.toDF()
     checkUnalignedPages(df)(actions: _*)
   }
 
   test("reading unaligned pages - struct type") {
-    val df = (0 until 2000).map(i => Tuple1((i.toLong, s"$i:${"o" * (i / 
100)}"))).toDF("s")
+    val df = (0 until 2000).map(i => Tuple1((i.toLong, s"$i:${"o".repeat(i / 
100)}"))).toDF("s")
     checkUnalignedPages(df)(
       df => df.filter("s._1 = 500"),
       df => df.filter("s._1 = 500 or s._1 = 1500"),
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
index 6f5855461fcc..77e5c3b6fbfb 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
@@ -79,8 +79,8 @@ abstract class ParquetPartitionDiscoverySuite
 
     check("10", IntegerType)
     check("1000000000000000", LongType)
-    val decimal = Decimal("1" * 20)
-    check("1" * 20, DecimalType(decimal.precision, decimal.scale))
+    val decimal = Decimal("1".repeat(20))
+    check("1".repeat(20), DecimalType(decimal.precision, decimal.scale))
     check("1.5", DoubleType)
     check("hello", StringType)
     check("1990-02-24", DateType)
@@ -770,7 +770,7 @@ abstract class ParquetPartitionDiscoverySuite
           Row(
             Long.MaxValue,
             4.5,
-            new java.math.BigDecimal(new BigInteger("1" * 20)),
+            new java.math.BigDecimal(new BigInteger("1".repeat(20))),
             java.sql.Date.valueOf("2015-05-23"),
             ts,
             "This is a string, /[]?=:",
@@ -1097,13 +1097,13 @@ abstract class ParquetPartitionDiscoverySuite
     }
 
     withTempPath { path =>
-      val df = Seq((1, "1"), (2, "3"), (3, "2" * 30)).toDF("i", "decimal")
+      val df = Seq((1, "1"), (2, "3"), (3, "2".repeat(30))).toDF("i", 
"decimal")
       
df.write.format("parquet").partitionBy("decimal").save(path.getAbsolutePath)
       checkAnswer(
         spark.read.load(path.getAbsolutePath),
         Row(1, BigDecimal("1")) ::
           Row(2, BigDecimal("3")) ::
-          Row(3, BigDecimal("2" * 30)) :: Nil)
+          Row(3, BigDecimal("2".repeat(30))) :: Nil)
     }
   }
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/xml/XmlSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/xml/XmlSuite.scala
index 9ab1b2c157e1..fdc8819658bb 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/xml/XmlSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/xml/XmlSuite.scala
@@ -2832,7 +2832,7 @@ class XmlSuite
   test("Find compatible types even if inferred DecimalType is not capable of 
other IntegralType") {
     val mixedIntegerAndDoubleRecords = Seq(
       """<ROW><a>3</a><b>1.1</b></ROW>""",
-      s"""<ROW><a>3.1</a><b>0.${"0" * 38}1</b></ROW>""").toDS()
+      s"""<ROW><a>3.1</a><b>0.${"0".repeat(38)}1</b></ROW>""").toDS()
     val xmlDF = spark.read
       .option("prefersDecimal", "true")
       .option("rowTag", "ROW")
@@ -2852,9 +2852,8 @@ class XmlSuite
     )
   }
 
-  def bigIntegerRecords: Dataset[String] =
-    spark.createDataset(spark.sparkContext.parallelize(
-      s"""<ROW><a>1${"0" * 38}</a><b>92233720368547758070</b></ROW>""" :: 
Nil))(Encoders.STRING)
+  def bigIntegerRecords: Dataset[String] = 
spark.createDataset(spark.sparkContext.parallelize(
+    s"""<ROW><a>1${"0".repeat(38)}</a><b>92233720368547758070</b></ROW>""" :: 
Nil))(Encoders.STRING)
 
   test("Infer big integers correctly even when it does not fit in decimal") {
     val df = spark.read
@@ -2874,7 +2873,7 @@ class XmlSuite
 
   def floatingValueRecords: Dataset[String] =
     spark.createDataset(spark.sparkContext.parallelize(
-      s"""<ROW><a>0.${"0" * 38}1</a><b>.01</b></ROW>""" :: 
Nil))(Encoders.STRING)
+      s"""<ROW><a>0.${"0".repeat(38)}1</a><b>.01</b></ROW>""" :: 
Nil))(Encoders.STRING)
 
   test("Infer floating-point values correctly even when it does not fit in 
decimal") {
     val df = spark.read
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/HashedRelationSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/HashedRelationSuite.scala
index 6590deaa47e0..6da5e0b1a123 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/HashedRelationSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/HashedRelationSuite.scala
@@ -293,7 +293,7 @@ class HashedRelationSuite extends SharedSparkSession {
     val key = 0L
     // the page array is initialized with length 1 << 17 (1M bytes),
     // so here we need a value larger than 1 << 18 (2M bytes), to trigger the 
bug
-    val bigStr = UTF8String.fromString("x" * (1 << 19))
+    val bigStr = UTF8String.fromString("x".repeat(1 << 19))
 
     map.append(key, unsafeProj(InternalRow(bigStr)))
     map.optimize()
@@ -410,7 +410,7 @@ class HashedRelationSuite extends SharedSparkSession {
     val unsafeProj = UnsafeProjection.create(
       Seq(BoundReference(0, IntegerType, false),
         BoundReference(1, StringType, true)))
-    val unsafeRow = unsafeProj(InternalRow(0, UTF8String.fromString(" " * 
100)))
+    val unsafeRow = unsafeProj(InternalRow(0, UTF8String.fromString(" 
".repeat(100))))
     val key = Seq(BoundReference(0, IntegerType, false))
     val rows = (0 until (1 << 24)).iterator.map { i =>
       unsafeRow.setInt(0, i % 1000000)
@@ -437,7 +437,7 @@ class HashedRelationSuite extends SharedSparkSession {
     val unsafeProj = UnsafeProjection.create(
       Seq(BoundReference(0, IntegerType, false),
         BoundReference(1, StringType, true)))
-    val unsafeRow = unsafeProj(InternalRow(0, UTF8String.fromString(" " * 
100)))
+    val unsafeRow = unsafeProj(InternalRow(0, UTF8String.fromString(" 
".repeat(100))))
     val key = Seq(BoundReference(0, IntegerType, false))
     val rows = (0 until (1 << 10)).iterator.map { i =>
       unsafeRow.setInt(0, i % 1000000)
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/RocksDBSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/RocksDBSuite.scala
index 3fe281c0eab5..ed32b4f33ee6 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/RocksDBSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/RocksDBSuite.scala
@@ -3611,7 +3611,7 @@ class RocksDBSuite extends AlsoTestWithRocksDBFeatures 
with SharedSparkSession
   def generateFiles(dir: String, fileToLengths: Seq[(String, Int)]): Unit = {
     fileToLengths.foreach { case (fileName, length) =>
       val file = new File(dir, fileName)
-      FileUtils.write(file, "a" * length, StandardCharsets.UTF_8)
+      FileUtils.write(file, "a".repeat(length), StandardCharsets.UTF_8)
     }
   }
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateSchemaCompatibilityCheckerSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateSchemaCompatibilityCheckerSuite.scala
index 000c1b7a225e..065124a78455 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateSchemaCompatibilityCheckerSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateSchemaCompatibilityCheckerSuite.scala
@@ -52,20 +52,20 @@ class StateSchemaCompatibilityCheckerSuite extends 
SharedSparkSession {
     .add(StructField("value3", structSchema, nullable = true))
 
   private val longKeySchema = new StructType()
-    .add(StructField("key" + "1" * 64 * 1024, IntegerType, nullable = true))
-    .add(StructField("key" + "2" * 64 * 1024, StringType, nullable = true))
-    .add(StructField("key" + "3" * 64 * 1024, structSchema, nullable = true))
+    .add(StructField("key" + "1".repeat(64 * 1024), IntegerType, nullable = 
true))
+    .add(StructField("key" + "2".repeat(64 * 1024), StringType, nullable = 
true))
+    .add(StructField("key" + "3".repeat( 64 * 1024), structSchema, nullable = 
true))
 
   private val longValueSchema = new StructType()
-    .add(StructField("value" + "1" * 64 * 1024, IntegerType, nullable = true))
-    .add(StructField("value" + "2" * 64 * 1024, StringType, nullable = true))
-    .add(StructField("value" + "3" * 64 * 1024, structSchema, nullable = true))
+    .add(StructField("value" + "1".repeat(64 * 1024), IntegerType, nullable = 
true))
+    .add(StructField("value" + "2".repeat(64 * 1024), StringType, nullable = 
true))
+    .add(StructField("value" + "3".repeat(64 * 1024), structSchema, nullable = 
true))
 
   private val keySchema65535Bytes = new StructType()
-    .add(StructField("k" * (65535 - 87), IntegerType, nullable = true))
+    .add(StructField("k".repeat(65535 - 87), IntegerType, nullable = true))
 
   private val valueSchema65535Bytes = new StructType()
-    .add(StructField("v" * (65535 - 87), IntegerType, nullable = true))
+    .add(StructField("v".repeat(65535 - 87), IntegerType, nullable = true))
 
   private val keySchemaWithCollation = new StructType()
     .add(StructField("key1", IntegerType, nullable = true))
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/FilteredScanSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/FilteredScanSuite.scala
index 94a3b5ed5788..786e50eea2e7 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/FilteredScanSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/FilteredScanSuite.scala
@@ -224,15 +224,15 @@ class FilteredScanSuite extends DataSourceTest with 
SharedSparkSession {
 
   sqlTest(
     "SELECT a, b, c FROM oneToTenFiltered WHERE c like 'c%'",
-    Seq(Row(3, 3 * 2, "c" * 5 + "C" * 5)))
+    Seq(Row(3, 3 * 2, "c".repeat(5) + "C".repeat(5))))
 
   sqlTest(
     "SELECT a, b, c FROM oneToTenFiltered WHERE c like '%D'",
-    Seq(Row(4, 4 * 2, "d" * 5 + "D" * 5)))
+    Seq(Row(4, 4 * 2, "d".repeat(5) + "D".repeat(5))))
 
   sqlTest(
     "SELECT a, b, c FROM oneToTenFiltered WHERE c like '%eE%'",
-    Seq(Row(5, 5 * 2, "e" * 5 + "E" * 5)))
+    Seq(Row(5, 5 * 2, "e".repeat(5) + "E".repeat(5))))
 
   testPushDown("SELECT * FROM oneToTenFiltered WHERE A = 1", 1, Set("a", "b", 
"c"))
   testPushDown("SELECT a FROM oneToTenFiltered WHERE A = 1", 1, Set("a"))
diff --git 
a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkMetadataOperationSuite.scala
 
b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkMetadataOperationSuite.scala
index 969b1da6cd4d..abd2b1983b34 100644
--- 
a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkMetadataOperationSuite.scala
+++ 
b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkMetadataOperationSuite.scala
@@ -675,7 +675,7 @@ class SparkMetadataOperationSuite extends 
HiveThriftServer2TestBase {
       while (rowSet.next()) {
         assert(rowSet.getString("COLUMN_NAME") === "c" + idx)
         assert(rowSet.getInt("DATA_TYPE") === java.sql.Types.TIMESTAMP)
-        assert(rowSet.getString("TYPE_NAME") === "TIMESTAMP" + ("_NTZ" * idx))
+        assert(rowSet.getString("TYPE_NAME") === "TIMESTAMP" + 
"_NTZ".repeat(idx))
         assert(rowSet.getInt("COLUMN_SIZE") === 8)
         assert(rowSet.getInt("DECIMAL_DIGITS") === 6)
         assert(rowSet.getInt("NUM_PREC_RADIX") === 0)
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ErrorPositionSuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ErrorPositionSuite.scala
index aa1973de7f67..a4caf78bed05 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ErrorPositionSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ErrorPositionSuite.scala
@@ -165,7 +165,7 @@ class ErrorPositionSuite extends QueryTest with 
TestHiveSingleton with BeforeAnd
           |
           |Actual: $actualStart, Expected: $expectedStart
           |$line
-          |${" " * actualStart}^
+          |${" ".repeat(actualStart)}^
           |0123456789 123456789 1234567890
           |          2         3
         """.stripMargin)
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeReadWriteSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeReadWriteSuite.scala
index 1922144a92ef..a5b4f3115ca3 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeReadWriteSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeReadWriteSuite.scala
@@ -107,9 +107,10 @@ class HiveSerDeReadWriteSuite extends QueryTest with 
SQLTestUtils with TestHiveS
     withTable("hive_serde") {
       hiveClient.runSqlHive(s"CREATE TABLE hive_serde (c1 CHAR(10)) STORED AS 
$fileFormat")
       hiveClient.runSqlHive("INSERT INTO TABLE hive_serde values('s')")
-      checkAnswer(spark.table("hive_serde"), Row("s" + " " * 9))
+      checkAnswer(spark.table("hive_serde"), Row("s" + " ".repeat(9)))
       spark.sql(s"INSERT INTO TABLE hive_serde values('s3')")
-      checkAnswer(spark.table("hive_serde"), Seq(Row("s" + " " * 9), Row("s3" 
+ " " * 8)))
+      checkAnswer(spark.table("hive_serde"),
+        Seq(Row("s" + " ".repeat(9)), Row("s3" + " ".repeat(8))))
     }
   }
 
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index 78d30866b4f2..5a918b54c964 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -2543,7 +2543,7 @@ abstract class SQLQuerySuiteBase extends QueryTest with 
SQLTestUtils with TestHi
     withTempDir { dir =>
       withSQLConf(HiveUtils.CONVERT_METASTORE_PARQUET.key -> "false") {
         withTable("test_precision") {
-          val df = sql(s"SELECT 'dummy' AS name, ${"1" * 20}.${"2" * 18} AS 
value")
+          val df = sql(s"SELECT 'dummy' AS name, 
${"1".repeat(20)}.${"2".repeat(18)} AS value")
           df.write.mode("Overwrite").parquet(dir.getAbsolutePath)
           sql(
             s"""
diff --git 
a/streaming/src/test/scala/org/apache/spark/streaming/util/RateLimitedOutputStreamSuite.scala
 
b/streaming/src/test/scala/org/apache/spark/streaming/util/RateLimitedOutputStreamSuite.scala
index 895da1a11e54..ee1d1bc3dee7 100644
--- 
a/streaming/src/test/scala/org/apache/spark/streaming/util/RateLimitedOutputStreamSuite.scala
+++ 
b/streaming/src/test/scala/org/apache/spark/streaming/util/RateLimitedOutputStreamSuite.scala
@@ -33,7 +33,7 @@ class RateLimitedOutputStreamSuite extends SparkFunSuite {
 
   test("write") {
     val underlying = new ByteArrayOutputStream
-    val data = "X" * 41000
+    val data = "X".repeat(41000)
     val stream = new RateLimitedOutputStream(underlying, desiredBytesPerSec = 
10000)
     val elapsedNs = benchmark { 
stream.write(data.getBytes(StandardCharsets.UTF_8)) }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to