Repository: spark
Updated Branches:
  refs/heads/master c86e9bc4f -> 5d7fe178b


SPARK-4170 [CORE] Closure problems when running Scala app that "extends App"

Warn against subclassing scala.App, and remove one instance of this in examples

Author: Sean Owen <[email protected]>

Closes #3497 from srowen/SPARK-4170 and squashes the following commits:

4a6131f [Sean Owen] Restore multiline string formatting
a8ca895 [Sean Owen] Warn against subclassing scala.App, and remove one instance 
of this in examples


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/5d7fe178
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/5d7fe178
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/5d7fe178

Branch: refs/heads/master
Commit: 5d7fe178b303918faa0893cd36963158b420309f
Parents: c86e9bc
Author: Sean Owen <[email protected]>
Authored: Thu Nov 27 09:03:17 2014 -0800
Committer: Aaron Davidson <[email protected]>
Committed: Thu Nov 27 09:03:17 2014 -0800

----------------------------------------------------------------------
 .../org/apache/spark/deploy/SparkSubmit.scala   |  5 ++
 docs/quick-start.md                             |  3 +
 .../spark/examples/mllib/LinearRegression.scala | 70 ++++++++++----------
 3 files changed, 44 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/5d7fe178/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala 
b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
index 8a62519..00f2918 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
@@ -345,6 +345,11 @@ object SparkSubmit {
         System.exit(CLASS_NOT_FOUND_EXIT_STATUS)
     }
 
+    // SPARK-4170
+    if (classOf[scala.App].isAssignableFrom(mainClass)) {
+      printWarning("Subclasses of scala.App may not work correctly. Use a 
main() method instead.")
+    }
+
     val mainMethod = mainClass.getMethod("main", new Array[String](0).getClass)
     if (!Modifier.isStatic(mainMethod.getModifiers)) {
       throw new IllegalStateException("The main method in the given main class 
must be static")

http://git-wip-us.apache.org/repos/asf/spark/blob/5d7fe178/docs/quick-start.md
----------------------------------------------------------------------
diff --git a/docs/quick-start.md b/docs/quick-start.md
index 6236de0..bf643bb 100644
--- a/docs/quick-start.md
+++ b/docs/quick-start.md
@@ -244,6 +244,9 @@ object SimpleApp {
 }
 {% endhighlight %}
 
+Note that applications should define a `main()` method instead of extending 
`scala.App`.
+Subclasses of `scala.App` may not work correctly.
+
 This program just counts the number of lines containing 'a' and the number 
containing 'b' in the
 Spark README. Note that you'll need to replace YOUR_SPARK_HOME with the 
location where Spark is
 installed. Unlike the earlier examples with the Spark shell, which initializes 
its own SparkContext,

http://git-wip-us.apache.org/repos/asf/spark/blob/5d7fe178/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala
 
b/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala
index 6815b1c..6a456ba 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala
@@ -33,7 +33,7 @@ import org.apache.spark.mllib.optimization.{SimpleUpdater, 
SquaredL2Updater, L1U
  * A synthetic dataset can be found at 
`data/mllib/sample_linear_regression_data.txt`.
  * If you use it as a template to create your own app, please use 
`spark-submit` to submit your app.
  */
-object LinearRegression extends App {
+object LinearRegression {
 
   object RegType extends Enumeration {
     type RegType = Value
@@ -49,40 +49,42 @@ object LinearRegression extends App {
       regType: RegType = L2,
       regParam: Double = 0.01) extends AbstractParams[Params]
 
-  val defaultParams = Params()
-
-  val parser = new OptionParser[Params]("LinearRegression") {
-    head("LinearRegression: an example app for linear regression.")
-    opt[Int]("numIterations")
-      .text("number of iterations")
-      .action((x, c) => c.copy(numIterations = x))
-    opt[Double]("stepSize")
-      .text(s"initial step size, default: ${defaultParams.stepSize}")
-      .action((x, c) => c.copy(stepSize = x))
-    opt[String]("regType")
-      .text(s"regularization type (${RegType.values.mkString(",")}), " +
-      s"default: ${defaultParams.regType}")
-      .action((x, c) => c.copy(regType = RegType.withName(x)))
-    opt[Double]("regParam")
-      .text(s"regularization parameter, default: ${defaultParams.regParam}")
-    arg[String]("<input>")
-      .required()
-      .text("input paths to labeled examples in LIBSVM format")
-      .action((x, c) => c.copy(input = x))
-    note(
-      """
-        |For example, the following command runs this app on a synthetic 
dataset:
-        |
-        | bin/spark-submit --class 
org.apache.spark.examples.mllib.LinearRegression \
-        |  examples/target/scala-*/spark-examples-*.jar \
-        |  data/mllib/sample_linear_regression_data.txt
-      """.stripMargin)
-  }
+  def main(args: Array[String]) {
+    val defaultParams = Params()
+
+    val parser = new OptionParser[Params]("LinearRegression") {
+      head("LinearRegression: an example app for linear regression.")
+      opt[Int]("numIterations")
+        .text("number of iterations")
+        .action((x, c) => c.copy(numIterations = x))
+      opt[Double]("stepSize")
+        .text(s"initial step size, default: ${defaultParams.stepSize}")
+        .action((x, c) => c.copy(stepSize = x))
+      opt[String]("regType")
+        .text(s"regularization type (${RegType.values.mkString(",")}), " +
+        s"default: ${defaultParams.regType}")
+        .action((x, c) => c.copy(regType = RegType.withName(x)))
+      opt[Double]("regParam")
+        .text(s"regularization parameter, default: ${defaultParams.regParam}")
+      arg[String]("<input>")
+        .required()
+        .text("input paths to labeled examples in LIBSVM format")
+        .action((x, c) => c.copy(input = x))
+      note(
+        """
+          |For example, the following command runs this app on a synthetic 
dataset:
+          |
+          | bin/spark-submit --class 
org.apache.spark.examples.mllib.LinearRegression \
+          |  examples/target/scala-*/spark-examples-*.jar \
+          |  data/mllib/sample_linear_regression_data.txt
+        """.stripMargin)
+    }
 
-  parser.parse(args, defaultParams).map { params =>
-    run(params)
-  } getOrElse {
-    sys.exit(1)
+    parser.parse(args, defaultParams).map { params =>
+      run(params)
+    } getOrElse {
+      sys.exit(1)
+    }
   }
 
   def run(params: Params) {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to