This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 104261cf7d9 HDDS-3466. Improve PipelinePlacementPolicy performance. 
(#9633)
104261cf7d9 is described below

commit 104261cf7d9a13c5564eda82ac657f8404e4ccec
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Wed Jan 14 12:37:30 2026 -0800

    HDDS-3466. Improve PipelinePlacementPolicy performance. (#9633)
---
 .../hadoop/hdds/scm/node/NodeStateManager.java     |  12 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java       |   4 +-
 .../hadoop/hdds/scm/node/states/NodeStateMap.java  |  19 +-
 .../hdds/scm/pipeline/PipelinePlacementPolicy.java |  59 ++---
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |  24 +-
 .../hadoop/hdds/scm/pipeline/SortedList.java       | 270 +++++++++++++++++++++
 .../hdds/scm/node/states/TestNodeStateMap.java     |   2 +-
 .../hadoop/hdds/scm/pipeline/TestSortedList.java   | 167 +++++++++++++
 8 files changed, 483 insertions(+), 74 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
index 5d98c31af0c..fb065a0086b 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
@@ -492,15 +492,9 @@ public int getEnteringMaintenanceNodeCount() {
     return getEnteringMaintenanceNodes().size();
   }
 
-  /**
-   * Returns all the nodes with the specified status.
-   *
-   * @param status NodeStatus
-   *
-   * @return list of nodes
-   */
-  public List<DatanodeInfo> getNodes(NodeStatus status) {
-    return nodeStateMap.getDatanodeInfos(status);
+  /** @return a list of datanodes for the matching nodes matching the given 
status. */
+  public List<DatanodeDetails> getNodes(NodeStatus status) {
+    return nodeStateMap.getDatanodeDetails(status);
   }
 
   /**
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index e2a17dd1d5c..66aeacee73c 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -238,9 +238,7 @@ protected NodeStateManager getNodeStateManager() {
    */
   @Override
   public List<DatanodeDetails> getNodes(NodeStatus nodeStatus) {
-    return nodeStateManager.getNodes(nodeStatus)
-        .stream()
-        .map(node -> (DatanodeDetails)node).collect(Collectors.toList());
+    return nodeStateManager.getNodes(nodeStatus);
   }
 
   /**
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
index da1b6062858..8aea57b23ab 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
@@ -23,8 +23,10 @@
 import java.util.Set;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.Function;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
@@ -180,15 +182,9 @@ public List<DatanodeInfo> getAllDatanodeInfos() {
     }
   }
 
-  /**
-   * Returns a list of the nodes as DatanodeInfo objects matching the passed
-   * status.
-   *
-   * @param status - The status of the nodes to return
-   * @return List of DatanodeInfo for the matching nodes
-   */
-  public List<DatanodeInfo> getDatanodeInfos(NodeStatus status) {
-    return filterNodes(matching(status));
+  /** @return a list of datanodes for the matching nodes matching the given 
status. */
+  public List<DatanodeDetails> getDatanodeDetails(NodeStatus status) {
+    return filterNodes(matching(status), d -> d);
   }
 
   /**
@@ -370,11 +366,16 @@ private int countNodes(Predicate<DatanodeInfo> filter) {
    * @return a list of all nodes matching the {@code filter}
    */
   private List<DatanodeInfo> filterNodes(Predicate<DatanodeInfo> filter) {
+    return filterNodes(filter, Function.identity());
+  }
+
+  private <T> List<T> filterNodes(Predicate<DatanodeInfo> filter, 
Function<DatanodeInfo, T> converter) {
     lock.readLock().lock();
     try {
       return nodeMap.values().stream()
           .map(DatanodeEntry::getInfo)
           .filter(filter)
+          .map(converter)
           .collect(Collectors.toList());
     } finally {
       lock.readLock().unlock();
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
index 366a33dccf2..203f8cab8d5 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
@@ -20,7 +20,7 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import java.util.ArrayList;
-import java.util.Comparator;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Objects;
 import java.util.stream.Collectors;
@@ -81,7 +81,7 @@ public PipelinePlacementPolicy(final NodeManager nodeManager,
         ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT);
   }
 
-  public static int currentRatisThreePipelineCount(
+  static int currentRatisThreePipelineCount(
       NodeManager nodeManager,
       PipelineStateManager stateManager,
       DatanodeDetails datanodeDetails) {
@@ -100,6 +100,18 @@ public static int currentRatisThreePipelineCount(
         .count();
   }
 
+  /** Filter the given datanodes within its pipeline limit. */
+  List<DatanodeDetails> filterPipelineLimit(Iterable<DatanodeDetails> 
datanodes) {
+    final SortedList<DatanodeDetails> sorted = new 
SortedList<>(DatanodeDetails.class);
+    for (DatanodeDetails d : datanodes) {
+      final int count = currentRatisThreePipelineCount(nodeManager, 
stateManager, d);
+      if (count < nodeManager.pipelineLimit(d)) {
+        sorted.add(d, count);
+      }
+    }
+    return sorted;
+  }
+
   private static boolean isNonClosedRatisThreePipeline(Pipeline p) {
     return p != null && p.getReplicationConfig()
         .equals(RatisReplicationConfig.getInstance(ReplicationFactor.THREE))
@@ -169,16 +181,7 @@ List<DatanodeDetails> filterViableNodes(
     // filter nodes that meet the size and pipeline engagement criteria.
     // Pipeline placement doesn't take node space left into account.
     // Sort the DNs by pipeline load.
-    // TODO check if sorting could cause performance issue: HDDS-3466.
-    List<DatanodeDetails> healthyList = healthyNodes.stream()
-        .map(d ->
-            new DnWithPipelines(d, currentRatisThreePipelineCount(nodeManager,
-                stateManager, d)))
-        .filter(d ->
-            (d.getPipelines() < nodeManager.pipelineLimit(d.getDn())))
-        .sorted(Comparator.comparingInt(DnWithPipelines::getPipelines))
-        .map(d -> d.getDn())
-        .collect(Collectors.toList());
+    final List<DatanodeDetails> healthyList = 
filterPipelineLimit(healthyNodes);
 
     if (healthyList.size() < nodesRequired) {
       if (LOG.isDebugEnabled()) {
@@ -217,12 +220,13 @@ List<DatanodeDetails> filterViableNodes(
    * @return True if there are multiple racks, false otherwise
    */
   private boolean multipleRacksAvailable(List<DatanodeDetails> dns) {
-    if (dns.size() <= 1) {
+    final Iterator<DatanodeDetails> i = dns.iterator();
+    if (!i.hasNext()) {
       return false;
     }
-    String initialRack = dns.get(0).getNetworkLocation();
-    for (DatanodeDetails dn : dns) {
-      if (!dn.getNetworkLocation().equals(initialRack)) {
+    final String initialRack = i.next().getNetworkLocation();
+    while (i.hasNext()) {
+      if (!i.next().getNetworkLocation().equals(initialRack)) {
         return true;
       }
     }
@@ -574,27 +578,4 @@ private boolean checkAllNodesAreEqual(NetworkTopology 
topology) {
   protected int getRequiredRackCount(int numReplicas, int excludedRackCount) {
     return REQUIRED_RACKS;
   }
-
-  /**
-   * static inner utility class for datanodes with pipeline, used for
-   * pipeline engagement checking.
-   */
-  public static class DnWithPipelines {
-    private DatanodeDetails dn;
-    private int pipelines;
-
-    DnWithPipelines(DatanodeDetails dn, int pipelines) {
-      this.dn = dn;
-      this.pipelines = pipelines;
-    }
-
-    public int getPipelines() {
-      return pipelines;
-    }
-
-    public DatanodeDetails getDn() {
-      return dn;
-    }
-  }
-
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
index 800a7ebfd83..8fe6934f1ed 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
@@ -19,6 +19,7 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Set;
@@ -37,7 +38,6 @@
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState;
-import 
org.apache.hadoop.hdds.scm.pipeline.PipelinePlacementPolicy.DnWithPipelines;
 import 
org.apache.hadoop.hdds.scm.pipeline.leader.choose.algorithms.LeaderChoosePolicy;
 import 
org.apache.hadoop.hdds.scm.pipeline.leader.choose.algorithms.LeaderChoosePolicyFactory;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
@@ -232,18 +232,16 @@ public Pipeline createForRead(
   }
 
   private List<DatanodeDetails> filterPipelineEngagement() {
-    List<DatanodeDetails> healthyNodes =
-        getNodeManager().getNodes(NodeStatus.inServiceHealthy());
-    List<DatanodeDetails> excluded = healthyNodes.stream()
-        .map(d ->
-            new DnWithPipelines(d,
-                PipelinePlacementPolicy
-                    .currentRatisThreePipelineCount(getNodeManager(),
-                    getPipelineStateManager(), d)))
-        .filter(d ->
-            (d.getPipelines() >= getNodeManager().pipelineLimit(d.getDn())))
-        .map(d -> d.getDn())
-        .collect(Collectors.toList());
+    final NodeManager nodeManager = getNodeManager();
+    final PipelineStateManager stateManager = getPipelineStateManager();
+    final List<DatanodeDetails> healthyNodes = 
nodeManager.getNodes(NodeStatus.inServiceHealthy());
+    final List<DatanodeDetails> excluded = new ArrayList<>();
+    for (DatanodeDetails d : healthyNodes) {
+      final int count = 
PipelinePlacementPolicy.currentRatisThreePipelineCount(nodeManager, 
stateManager, d);
+      if (count >= nodeManager.pipelineLimit(d)) {
+        excluded.add(d);
+      }
+    }
     return excluded;
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SortedList.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SortedList.java
new file mode 100644
index 00000000000..35bda45a506
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SortedList.java
@@ -0,0 +1,270 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipeline;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.Objects;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.function.BiFunction;
+import java.util.function.Predicate;
+
+/**
+ * A sorted list using bucket-sort with bucket size == 1.
+ * The elements are sorted by their weights
+ * while different elements may have the same weight.
+ * <p>
+ * The number of buckets is assumed to be much smaller than the number of 
elements.
+ * For examples, a cluster may have 5,000 datanodes (elements)
+ * but the number of pipelines (buckets) is mostly less than 100.
+ * Therefore, this class O(n log b) is more efficient than the usual sorting 
O(n log n),
+ * where n is the number of elements and b is the number of buckets.
+ * <p>
+ * Note that some unused methods in {@link List} are unsupported.
+ * <p>
+ * This class is not threadsafe.
+ */
+final class SortedList<E> implements List<E> {
+  private final Class<E> clazz;
+  private final SortedMap<Integer, List<E>> buckets = new TreeMap<>();
+  private int numElements = 0;
+
+  SortedList(Class<E> clazz) {
+    Objects.requireNonNull(clazz, "clazz == null");
+    this.clazz = clazz;
+  }
+
+  @Override
+  public int size() {
+    return numElements;
+  }
+
+  @Override
+  public boolean isEmpty() {
+    return size() == 0;
+  }
+
+  /** Add the given element with the given weight to this list. */
+  public boolean add(E element, int weight) {
+    Objects.requireNonNull(element, "element == null");
+    buckets.computeIfAbsent(weight, k -> new ArrayList<>(64)).add(element);
+    numElements++;
+    return true;
+  }
+
+  private E getOrRemove(String name, int index, BiFunction<List<E>, Integer, 
E> method) {
+    if (index < 0) {
+      throw new IndexOutOfBoundsException("index = " + index + " < 0");
+    }
+    final int s = size();
+    if (index >= s) {
+      throw new IndexOutOfBoundsException("index = " + index + " >= size = " + 
s);
+    }
+
+    for (Iterator<List<E>> i = buckets.values().iterator(); i.hasNext();) {
+      final List<E> bucket = i.next();
+      final int n = bucket.size();
+      if (index < n) {
+        final E e = method.apply(bucket, index);
+        if (bucket.isEmpty()) {
+          i.remove();
+        }
+        return e;
+      }
+      index -= n;
+    }
+    throw new IllegalStateException("Failed to " + name + " element at index " 
+ index + ", " + this);
+  }
+
+  @Override
+  public E get(int index) {
+    return getOrRemove("get", index, List::get);
+  }
+
+  @Override
+  public E remove(int index) {
+    final E removed = getOrRemove("remove", index, (list, i) -> 
list.remove((int)i));
+    numElements--;
+    return removed;
+  }
+
+  private boolean containsOrRemove(Object element, Predicate<List<E>> method) {
+    if (!clazz.isInstance(element)) {
+      return false;
+    }
+    for (Iterator<List<E>> i = buckets.values().iterator(); i.hasNext();) {
+      final List<E> bucket = i.next();
+      if (method.test(bucket)) {
+        if (bucket.isEmpty()) {
+          i.remove();
+        }
+        return true;
+      }
+    }
+    return false;
+  }
+
+  @Override
+  public boolean contains(Object element) {
+    return containsOrRemove(element, b -> b.contains(element));
+  }
+
+  @Override
+  public boolean remove(Object element) {
+    return containsOrRemove(element, b -> {
+      if (b.remove(element)) {
+        numElements--;
+        return true;
+      }
+      return false;
+    });
+  }
+
+  @Override
+  public boolean removeAll(Collection<?> elements) {
+    boolean changed = false;
+    for (Object e : elements) {
+      changed |= remove(e);
+    }
+    return changed;
+  }
+
+  @Override
+  public void clear() {
+    buckets.clear();
+    numElements = 0;
+  }
+
+  @Override
+  public Iterator<E> iterator() {
+    return new Iterator<E>() {
+      private final Iterator<List<E>> bucketIterator = 
buckets.values().iterator();
+      private Iterator<E> i = bucketIterator.hasNext() ? 
bucketIterator.next().iterator()
+          : Collections.<E>emptyIterator();
+
+      @Override
+      public boolean hasNext() {
+        return i.hasNext();
+      }
+
+      @Override
+      public E next() {
+        final E element = i.next();
+        if (!i.hasNext()) {
+          if (bucketIterator.hasNext()) {
+            i = bucketIterator.next().iterator();
+          }
+        }
+        return element;
+      }
+    };
+  }
+
+  @Override
+  public String toString() {
+    if (numElements == 0) {
+      return "[]";
+    }
+
+    final StringBuilder b = new StringBuilder("[");
+    for (Map.Entry<Integer, List<E>> e : buckets.entrySet()) {
+      final List<E> list = e.getValue();
+      b.append("\n  ").append(e.getKey())
+          .append(" #").append(list.size())
+          .append(": ").append(list);
+    }
+    return b.append("\n] 
(").append("size=").append(size()).append(")\n").toString();
+  }
+
+  // ------- The methods below are unsupported. -------
+  @Override
+  public E set(int index, E element) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean add(E e) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void add(int index, E element) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean addAll(Collection<? extends E> c) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean addAll(int index, Collection<? extends E> c) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean retainAll(Collection<?> c) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean containsAll(Collection<?> c) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public int indexOf(Object o) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public int lastIndexOf(Object o) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public ListIterator<E> listIterator() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public ListIterator<E> listIterator(int index) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<E> subList(int fromIndex, int toIndex) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Object[] toArray() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public <T> T[] toArray(T[] a) {
+    throw new UnsupportedOperationException();
+  }
+}
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
index a9b718ce89b..c079c509ad8 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
@@ -129,7 +129,7 @@ private void runTestGetNode(long opExpiryEpochSeconds)
     }
     final NodeStatus requestedState = NodeStatus.valueOf(
         NodeOperationalState.IN_SERVICE, NodeState.STALE, 
opExpiryEpochSeconds);
-    List<DatanodeInfo> nodes = map.getDatanodeInfos(requestedState);
+    final List<DatanodeDetails> nodes = map.getDatanodeDetails(requestedState);
     assertEquals(1, nodes.size());
     assertEquals(1, map.getNodeCount(requestedState));
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSortedList.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSortedList.java
new file mode 100644
index 00000000000..a545d8d0eae
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSortedList.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipeline;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Random;
+import org.junit.jupiter.api.Test;
+
+/** Test {@link SortedList}. */
+public class TestSortedList {
+  private static final Random RANDOM = new Random();
+  private static final int LOOP = 2_000;
+  private static final int MAX_WEIGHT = LOOP / 100;
+  private static int id = 0;
+
+  static class Element implements Comparable<Element> {
+    private final int weight;
+    private final String value;
+
+    Element(int weight) {
+      this.weight = weight;
+      this.value = String.format("e%04d(%02d)", ++id, weight);
+    }
+
+    int getWeight() {
+      return weight;
+    }
+
+    String getValue() {
+      return value;
+    }
+
+    @Override
+    public int compareTo(Element that) {
+      return Comparator.comparingInt(Element::getWeight)
+          .thenComparing(Element::getValue)
+          .compare(this, that);
+    }
+
+    @Override
+    public int hashCode() {
+      return weight;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (obj == this) {
+        return true;
+      } else if (!(obj instanceof Element)) {
+        return false;
+      }
+      final Element that = (Element) obj;
+      if (this.value.equals(that.value)) {
+        assertEquals(this.weight, that.weight);
+        return true;
+      }
+      return false;
+    }
+
+    @Override
+    public String toString() {
+      return value;
+    }
+  }
+
+  enum Method {
+    ADD, REMOVE_JAVA, REMOVE_SORTED;
+
+    static Method random() {
+      final int r = RANDOM.nextInt(100);
+      return r < 60 ? ADD : r < 80 ? REMOVE_JAVA : REMOVE_SORTED;
+    }
+  }
+
+  @Test
+  public void test() {
+    final List<Element> javaList = new ArrayList<>();
+    final SortedList<Element> sortedList = new SortedList<>(Element.class);
+
+    for (int i = 0; i < LOOP; i++) {
+      final int size = javaList.size();
+      final Method method = javaList.isEmpty() ? Method.ADD : Method.random();
+      System.out.printf("%5d (size=%5d): %14s ", i, size, method);
+      switch (method) {
+      case ADD:
+        add(javaList, sortedList);
+        break;
+      case REMOVE_JAVA:
+        remove(javaList, sortedList);
+        break;
+      case REMOVE_SORTED:
+        remove(sortedList, javaList);
+        break;
+      default:
+        throw new AssertionError();
+      }
+    }
+  }
+
+  static void add(List<Element> javaList, SortedList<Element> sortedList) {
+    final Element e = new Element(RANDOM.nextInt(MAX_WEIGHT));
+    System.out.println(e);
+    javaList.add(e);
+    Collections.sort(javaList);
+    sortedList.add(e, e.weight);
+    assertLists(javaList, sortedList);
+  }
+
+  static void remove(List<Element> left, List<Element> right) {
+    final Element e = left.remove(RANDOM.nextInt(left.size()));
+    System.out.println(e);
+    assertTrue(right.remove(e));
+    assertLists(left, right);
+
+    assertFalse(left.remove(e));
+    assertFalse(right.remove(e));
+  }
+
+  static void assertLists(List<Element> left, List<Element> right) {
+    assertEquals(left.isEmpty(), right.isEmpty());
+    assertOrdering(left, right);
+    assertOrdering(right, left);
+  }
+
+  static void assertOrdering(List<Element> ordering, List<Element> containing) 
{
+    final int size = containing.size();
+    assertEquals(size, ordering.size());
+
+    int min = -1;
+    int count = 0;
+    final Iterator<Element> actual = containing.iterator();
+    for (Element e : ordering) {
+      assertThat(e.weight).isGreaterThanOrEqualTo(min);
+      min = e.weight;
+      assertThat(containing).contains(e);
+      assertTrue(actual.hasNext());
+      assertSame(e, actual.next(), "[" + count + "]");
+      count++;
+    }
+    assertEquals(size, count, () -> ordering.getClass().getSimpleName() + " " 
+ ordering);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to