This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git

commit e323696accdaa852c1d953b097fdd5ab0599a646
Author: beliefer <[email protected]>
AuthorDate: Thu Mar 12 11:03:47 2020 +0900

    [SPARK-30911][CORE][DOC] Add version information to the configuration of 
Status
    
    ### What changes were proposed in this pull request?
    1.Add version information to the configuration of `Status`.
    2.Update the docs of `Status`.
    3.By the way supplementary documentation about 
https://github.com/apache/spark/pull/27847
    
    I sorted out some information show below.
    
    Item name | Since version | JIRA ID | Commit ID | Note
    -- | -- | -- | -- | --
    spark.appStateStore.asyncTracking.enable | 2.3.0 | SPARK-20653 | 
772e4648d95bda3353723337723543c741ea8476#diff-9ab674b7af7b2097f7d28cb6f5fd1e8c 
|  
    spark.ui.liveUpdate.period | 2.3.0 | SPARK-20644 | 
c7f38e5adb88d43ef60662c5d6ff4e7a95bff580#diff-9ab674b7af7b2097f7d28cb6f5fd1e8c 
|  
    spark.ui.liveUpdate.minFlushPeriod | 2.4.2 | SPARK-27394 | 
a8a2ba11ac10051423e58920062b50f328b06421#diff-9ab674b7af7b2097f7d28cb6f5fd1e8c 
|  
    spark.ui.retainedJobs | 1.2.0 | SPARK-2321 | 
9530316887612dca060a128fca34dd5a6ab2a9a9#diff-1f32bcb61f51133bd0959a4177a066a5 
|  
    spark.ui.retainedStages | 0.9.0 | None | 
112c0a1776bbc866a1026a9579c6f72f293414c4#diff-1f32bcb61f51133bd0959a4177a066a5 
| 0.9.0-incubating-SNAPSHOT
    spark.ui.retainedTasks | 2.0.1 | SPARK-15083 | 
55db26245d69bb02b7d7d5f25029b1a1cd571644#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.ui.retainedDeadExecutors | 2.0.0 | SPARK-7729 | 
9f4263392e492b5bc0acecec2712438ff9a257b7#diff-a0ba36f9b1f9829bf3c4689b05ab6cf2 
|  
    spark.ui.dagGraph.retainedRootRDDs | 2.1.0 | SPARK-17171 | 
cc87280fcd065b01667ca7a59a1a32c7ab757355#diff-3f492c527ea26679d4307041b28455b8 
|  
    spark.metrics.appStatusSource.enabled | 3.0.0 | SPARK-30060 | 
60f20e5ea2000ab8f4a593b5e4217fd5637c5e22#diff-9f796ae06b0272c1f0a012652a5b68d0 
|  
    
    ### Why are the changes needed?
    Supplemental configuration version information.
    
    ### Does this PR introduce any user-facing change?
    No
    
    ### How was this patch tested?
    Exists UT
    
    Closes #27848 from beliefer/add-version-to-status-config.
    
    Lead-authored-by: beliefer <[email protected]>
    Co-authored-by: Jiaan Geng <[email protected]>
    Signed-off-by: HyukjinKwon <[email protected]>
---
 .../org/apache/spark/internal/config/Status.scala   |  9 +++++++++
 docs/configuration.md                               | 21 +++++++++++++++------
 2 files changed, 24 insertions(+), 6 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/internal/config/Status.scala 
b/core/src/main/scala/org/apache/spark/internal/config/Status.scala
index 3cc00a6..669fa07 100644
--- a/core/src/main/scala/org/apache/spark/internal/config/Status.scala
+++ b/core/src/main/scala/org/apache/spark/internal/config/Status.scala
@@ -22,36 +22,44 @@ import java.util.concurrent.TimeUnit
 private[spark] object Status {
 
   val ASYNC_TRACKING_ENABLED = 
ConfigBuilder("spark.appStateStore.asyncTracking.enable")
+    .version("2.3.0")
     .booleanConf
     .createWithDefault(true)
 
   val LIVE_ENTITY_UPDATE_PERIOD = ConfigBuilder("spark.ui.liveUpdate.period")
+    .version("2.3.0")
     .timeConf(TimeUnit.NANOSECONDS)
     .createWithDefaultString("100ms")
 
   val LIVE_ENTITY_UPDATE_MIN_FLUSH_PERIOD = 
ConfigBuilder("spark.ui.liveUpdate.minFlushPeriod")
     .doc("Minimum time elapsed before stale UI data is flushed. This avoids UI 
staleness when " +
       "incoming task events are not fired frequently.")
+    .version("2.4.2")
     .timeConf(TimeUnit.NANOSECONDS)
     .createWithDefaultString("1s")
 
   val MAX_RETAINED_JOBS = ConfigBuilder("spark.ui.retainedJobs")
+    .version("1.2.0")
     .intConf
     .createWithDefault(1000)
 
   val MAX_RETAINED_STAGES = ConfigBuilder("spark.ui.retainedStages")
+    .version("0.9.0")
     .intConf
     .createWithDefault(1000)
 
   val MAX_RETAINED_TASKS_PER_STAGE = ConfigBuilder("spark.ui.retainedTasks")
+    .version("2.0.1")
     .intConf
     .createWithDefault(100000)
 
   val MAX_RETAINED_DEAD_EXECUTORS = 
ConfigBuilder("spark.ui.retainedDeadExecutors")
+    .version("2.0.0")
     .intConf
     .createWithDefault(100)
 
   val MAX_RETAINED_ROOT_NODES = 
ConfigBuilder("spark.ui.dagGraph.retainedRootRDDs")
+    .version("2.1.0")
     .intConf
     .createWithDefault(Int.MaxValue)
 
@@ -59,6 +67,7 @@ private[spark] object Status {
     ConfigBuilder("spark.metrics.appStatusSource.enabled")
       .doc("Whether Dropwizard/Codahale metrics " +
         "will be reported for the status of the running spark app.")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 }
diff --git a/docs/configuration.md b/docs/configuration.md
index 7afb715..d22038b 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -516,7 +516,7 @@ Apart from these, the following properties are also 
available, and may be useful
     backwards-compatibility with older versions of Spark. Users typically 
should not need to set
     this option.
   </td>
-  <td>1.0.0</td> 
+  <td>1.0.0</td>
 </tr>
 <tr>
   <td><code>spark.executor.defaultJavaOptions</code></td>
@@ -1052,6 +1052,7 @@ Apart from these, the following properties are also 
available, and may be useful
   <td>
     How many DAG graph nodes the Spark UI and status APIs remember before 
garbage collecting.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.ui.enabled</code></td>
@@ -1077,6 +1078,7 @@ Apart from these, the following properties are also 
available, and may be useful
     meaning only the last write will happen. For live applications, this 
avoids a few
     operations that we can live without when rapidly processing incoming task 
events.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.ui.liveUpdate.minFlushPeriod</code></td>
@@ -1085,6 +1087,7 @@ Apart from these, the following properties are also 
available, and may be useful
     Minimum time elapsed before stale UI data is flushed. This avoids UI 
staleness when incoming
     task events are not fired frequently.
   </td>
+  <td>2.4.2</td>
 </tr>
 <tr>
   <td><code>spark.ui.port</code></td>
@@ -1101,6 +1104,7 @@ Apart from these, the following properties are also 
available, and may be useful
     How many jobs the Spark UI and status APIs remember before garbage 
collecting.
     This is a target maximum, and fewer elements may be retained in some 
circumstances.
   </td>
+  <td>1.2.0</td>
 </tr>
 <tr>
   <td><code>spark.ui.retainedStages</code></td>
@@ -1109,6 +1113,7 @@ Apart from these, the following properties are also 
available, and may be useful
     How many stages the Spark UI and status APIs remember before garbage 
collecting.
     This is a target maximum, and fewer elements may be retained in some 
circumstances.
   </td>
+  <td>0.9.0</td>
 </tr>
 <tr>
   <td><code>spark.ui.retainedTasks</code></td>
@@ -1117,6 +1122,7 @@ Apart from these, the following properties are also 
available, and may be useful
     How many tasks in one stage the Spark UI and status APIs remember before 
garbage collecting.
     This is a target maximum, and fewer elements may be retained in some 
circumstances.
   </td>
+  <td>2.0.1</td>
 </tr>
 <tr>
   <td><code>spark.ui.reverseProxy</code></td>
@@ -1211,6 +1217,7 @@ Apart from these, the following properties are also 
available, and may be useful
   <td>
     How many dead executors the Spark UI and status APIs remember before 
garbage collecting.
   </td>
+  <td>2.0.0</td>
 </tr>
 <tr>
   <td><code>spark.ui.filters</code></td>
@@ -1581,13 +1588,15 @@ Apart from these, the following properties are also 
available, and may be useful
   </td>
 </tr>
 <tr>
-    <td><code>spark.executor.heartbeatInterval</code></td>
-    <td>10s</td>
-    <td>Interval between each executor's heartbeats to the driver.  Heartbeats 
let
+  <td><code>spark.executor.heartbeatInterval</code></td>
+  <td>10s</td>
+  <td>
+    Interval between each executor's heartbeats to the driver.  Heartbeats let
     the driver know that the executor is still alive and update it with 
metrics for in-progress
     tasks. spark.executor.heartbeatInterval should be significantly less than
-    spark.network.timeout</td>
-    <td>1.1.0</td>
+    spark.network.timeout
+  </td>
+  <td>1.1.0</td>
 </tr>
 <tr>
   <td><code>spark.files.fetchTimeout</code></td>


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to