This is an automated email from the ASF dual-hosted git repository.
gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 20bb7c0b8584 Revert "[SPARK-46530][PYTHON][SQL][FOLLOW-UP] Uses path
separator instead of file separator to correctly check PySpark library
existence"
20bb7c0b8584 is described below
commit 20bb7c0b858422c05e6c52ff51ee7d953bf614ca
Author: Hyukjin Kwon <[email protected]>
AuthorDate: Wed Jan 24 11:25:01 2024 +0900
Revert "[SPARK-46530][PYTHON][SQL][FOLLOW-UP] Uses path separator instead
of file separator to correctly check PySpark library existence"
This reverts commit b303eced7f8639887278db34e0080ffa0c19bd0c.
---
core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala | 6 ++----
.../apache/spark/sql/execution/datasources/DataSourceManager.scala | 4 +++-
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala
b/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala
index 929058fb7185..26c790a12447 100644
--- a/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala
+++ b/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala
@@ -36,7 +36,7 @@ private[spark] object PythonUtils extends Logging {
val PY4J_ZIP_NAME = "py4j-0.10.9.7-src.zip"
/** Get the PYTHONPATH for PySpark, either from SPARK_HOME, if it is set, or
from our JAR */
- def sparkPythonPaths: Seq[String] = {
+ def sparkPythonPath: String = {
val pythonPath = new ArrayBuffer[String]
for (sparkHome <- sys.env.get("SPARK_HOME")) {
pythonPath += Seq(sparkHome, "python", "lib",
"pyspark.zip").mkString(File.separator)
@@ -44,11 +44,9 @@ private[spark] object PythonUtils extends Logging {
Seq(sparkHome, "python", "lib", PY4J_ZIP_NAME).mkString(File.separator)
}
pythonPath ++= SparkContext.jarOfObject(this)
- pythonPath.toSeq
+ pythonPath.mkString(File.pathSeparator)
}
- def sparkPythonPath: String = sparkPythonPaths.mkString(File.pathSeparator)
-
/** Merge PYTHONPATHS with the appropriate separator. Ignores blank strings.
*/
def mergePythonPaths(paths: String*): String = {
paths.filter(_ != "").mkString(File.pathSeparator)
diff --git
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceManager.scala
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceManager.scala
index 8ee2325ca1f9..ef18a3c67cf4 100644
---
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceManager.scala
+++
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceManager.scala
@@ -20,6 +20,7 @@ package org.apache.spark.sql.execution.datasources
import java.io.File
import java.util.Locale
import java.util.concurrent.ConcurrentHashMap
+import java.util.regex.Pattern
import org.apache.spark.api.python.PythonUtils
import org.apache.spark.internal.Logging
@@ -95,7 +96,8 @@ object DataSourceManager extends Logging {
private lazy val shouldLoadPythonDataSources: Boolean = {
Utils.checkCommandAvailable(PythonUtils.defaultPythonExec) &&
// Make sure PySpark zipped files also exist.
- PythonUtils.sparkPythonPaths.forall(new File(_).exists())
+ PythonUtils.sparkPythonPath
+ .split(Pattern.quote(File.separator)).forall(new File(_).exists())
}
private def normalize(name: String): String = name.toLowerCase(Locale.ROOT)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]