This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new b46cf270abab [SPARK-53324][K8S][FOLLOWUP] Use `invokePrivate` to 
access `numOutstandingPods`
b46cf270abab is described below

commit b46cf270abab0daaee4c504951fdf3cc2920bb76
Author: Dongjoon Hyun <[email protected]>
AuthorDate: Thu Oct 23 14:11:04 2025 -0700

    [SPARK-53324][K8S][FOLLOWUP] Use `invokePrivate` to access 
`numOutstandingPods`
    
    ### What changes were proposed in this pull request?
    
    This is a follow-up of the following to apply the latest `master` branch 
change.
    - #51913
    
    ### Why are the changes needed?
    
    The master branch compilation is broken currently.
    - https://github.com/apache/spark/actions/runs/18760939731
    
    Recently, we changed `numOutstandingPods`'s visibility, but I missed that 
the old open PRs didn't have this change.
    - #52614
    
    ### Does this PR introduce _any_ user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    Pass the CIs.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    No.
    
    Closes #52714 from dongjoon-hyun/SPARK-53324.
    
    Authored-by: Dongjoon Hyun <[email protected]>
    Signed-off-by: Dongjoon Hyun <[email protected]>
---
 .../spark/scheduler/cluster/k8s/ExecutorPodsAllocatorSuite.scala  | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git 
a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocatorSuite.scala
 
b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocatorSuite.scala
index 244a8c96d23f..0f4db9b8e036 100644
--- 
a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocatorSuite.scala
+++ 
b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocatorSuite.scala
@@ -273,7 +273,7 @@ class ExecutorPodsAllocatorSuite extends SparkFunSuite with 
BeforeAndAfter {
     // Request more than the max per rp for one rp
     podsAllocatorUnderTest.setTotalExpectedExecutors(Map(defaultProfile -> 2, 
rp -> 3))
     // 2 for default, and 2 for rp
-    assert(podsAllocatorUnderTest.numOutstandingPods.get() == 4)
+    assert(podsAllocatorUnderTest.invokePrivate(numOutstandingPods).get() == 4)
     verify(podsWithNamespace).resource(podWithAttachedContainerForId(1, 
defaultProfile.id))
     verify(podsWithNamespace).resource(podWithAttachedContainerForId(2, 
defaultProfile.id))
     verify(podsWithNamespace).resource(podWithAttachedContainerForId(3, rp.id))
@@ -285,7 +285,7 @@ class ExecutorPodsAllocatorSuite extends SparkFunSuite with 
BeforeAndAfter {
     snapshotsStore.updatePod(pendingExecutor(2, defaultProfile.id))
     snapshotsStore.updatePod(pendingExecutor(3, rp.id))
     snapshotsStore.notifySubscribers()
-    assert(podsAllocatorUnderTest.numOutstandingPods.get() == 4)
+    assert(podsAllocatorUnderTest.invokePrivate(numOutstandingPods).get() == 4)
     verify(podResource, times(4)).create()
     verify(labeledPods, never()).delete()
 
@@ -294,14 +294,14 @@ class ExecutorPodsAllocatorSuite extends SparkFunSuite 
with BeforeAndAfter {
     waitForExecutorPodsClock.advance(executorIdleTimeout * 2)
     podsAllocatorUnderTest.setTotalExpectedExecutors(Map(defaultProfile -> 1, 
rp -> 3))
     snapshotsStore.notifySubscribers()
-    assert(podsAllocatorUnderTest.numOutstandingPods.get() == 3)
+    assert(podsAllocatorUnderTest.invokePrivate(numOutstandingPods).get() == 3)
     verify(labeledPods, times(1)).delete()
 
     // Make one pod running from non-default rp so we have one more slot for 
pending pods.
     snapshotsStore.updatePod(runningExecutor(3, rp.id))
     snapshotsStore.updatePod(pendingExecutor(4, rp.id))
     snapshotsStore.notifySubscribers()
-    assert(podsAllocatorUnderTest.numOutstandingPods.get() == 3)
+    assert(podsAllocatorUnderTest.invokePrivate(numOutstandingPods).get() == 3)
     verify(podsWithNamespace).resource(podWithAttachedContainerForId(5, rp.id))
     verify(labeledPods, times(1)).delete()
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to