dsmiley commented on a change in pull request #1366: SOLR-14342: Optimize core loading order in SolrCloud. URL: https://github.com/apache/lucene-solr/pull/1366#discussion_r395421972
########## File path: solr/core/src/test/org/apache/solr/core/CoreSorterTest.java ########## @@ -71,168 +69,141 @@ public void testComparator() { ); + testComparator(expected, l); + + integrationTest(expected, l); + } + + private void testComparator(List<CountsForEachShard> expectedCounts, List<CountsForEachShard> inputCounts) { for (int i = 0; i < 10; i++) { - List<CountsForEachShard> copy = new ArrayList<>(l); + List<CountsForEachShard> copy = new ArrayList<>(inputCounts); Collections.shuffle(copy, random()); Collections.sort(copy, CoreSorter.countsComparator); for (int j = 0; j < copy.size(); j++) { - assertEquals(expected.get(j), copy.get(j)); + assertEquals(expectedCounts.get(j), copy.get(j)); } } } - public void testSort() throws Exception { - CoreContainer mockCC = getMockContainer(); - MockCoreSorter coreSorter = (MockCoreSorter) new MockCoreSorter().init(mockCC); - List<CoreDescriptor> copy = new ArrayList<>(coreSorter.getLocalCores()); - Collections.sort(copy, coreSorter::compare); - List<CountsForEachShard> l = copy.stream() - .map(CoreDescriptor::getCloudDescriptor) - .map(it -> coreSorter.shardsVsReplicaCounts.get(getShardName(it))) - .collect(toList()); - for (int i = 1; i < l.size(); i++) { - CountsForEachShard curr = l.get(i); - CountsForEachShard prev = l.get(i-1); - assertTrue(CoreSorter.countsComparator.compare(prev, curr) < 1); - } - - for (CountsForEachShard c : l) { - System.out.println(c); - } - } - - private CoreContainer getMockContainer() { + private void integrationTest(List<CountsForEachShard> expectedCounts, List<CountsForEachShard> _inputCounts) { assumeWorkingMockito(); - - CoreContainer mockCC = mock(CoreContainer.class); - ZkController mockZKC = mock(ZkController.class); - ClusterState mockClusterState = mock(ClusterState.class); - when(mockCC.isZooKeeperAware()).thenReturn(true); - when(mockCC.getZkController()).thenReturn(mockZKC); - when(mockClusterState.getLiveNodes()).thenReturn(liveNodes); - when(mockZKC.getClusterState()).thenReturn(mockClusterState); - return mockCC; - } - static class ReplicaInfo { - final int coll, slice, replica; - final String replicaName; - CloudDescriptor cd; - - ReplicaInfo(int coll, int slice, int replica) { - this.coll = coll; - this.slice = slice; - this.replica = replica; - replicaName = "coll_" + coll + "_" + slice + "_" + replica; - Properties p = new Properties(); - p.setProperty(CoreDescriptor.CORE_SHARD, "shard_" + slice); - p.setProperty(CoreDescriptor.CORE_COLLECTION, "coll_" + slice); - p.setProperty(CoreDescriptor.CORE_NODE_NAME, replicaName); - cd = new CloudDescriptor(null, replicaName, p); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof ReplicaInfo) { - ReplicaInfo replicaInfo = (ReplicaInfo) obj; - return replicaInfo.replicaName.equals(replicaName); + List<CountsForEachShard> perShardCounts = new ArrayList<>(_inputCounts); + Collections.shuffle(perShardCounts); + + // compute nodes, some live, some down + final int maxNodesOfAType = perShardCounts.stream() // not too important how many we have, but lets have plenty + .mapToInt(c -> c.totalReplicasInLiveNodes + c.totalReplicasInDownNodes + c.myReplicas).max().getAsInt(); + List<String> liveNodes = IntStream.range(0, maxNodesOfAType).mapToObj(i -> "192.168.0." + i + "_8983").collect(Collectors.toList()); + Collections.shuffle(liveNodes, random()); + String thisNode = liveNodes.get(0); + List<String> otherLiveNodes = liveNodes.subList(1, liveNodes.size()); + List<String> downNodes = IntStream.range(0, maxNodesOfAType).mapToObj(i -> "192.168.1." + i + "_8983").collect(Collectors.toList()); + + // divide into two collections + int numCol1 = random().nextInt(perShardCounts.size()); + Map<String,List<CountsForEachShard>> collToCounts = new HashMap<>(); + collToCounts.put("col1", perShardCounts.subList(0, numCol1)); + collToCounts.put("col2", perShardCounts.subList(numCol1, perShardCounts.size())); + + Map<String,DocCollection> collToState = new HashMap<>(); + Map<CountsForEachShard, List<CoreDescriptor>> myCountsToDescs = new HashMap<>(); + for (Map.Entry<String, List<CountsForEachShard>> entry : collToCounts.entrySet()) { + String collection = entry.getKey(); + List<CountsForEachShard> collCounts = entry.getValue(); + Map<String, Slice> sliceMap = new HashMap<>(collCounts.size()); + for (CountsForEachShard shardCounts : collCounts) { + String slice = "s" + shardCounts.hashCode(); + List<Replica> replicas = new ArrayList<>(); + for (int myRepNum = 0; myRepNum < shardCounts.myReplicas; myRepNum++) { + addNewReplica(replicas, collection, slice, Collections.singletonList(thisNode)); + // save this mapping for later + myCountsToDescs.put(shardCounts, replicas.stream().map(this::newCoreDescriptor).collect(Collectors.toList())); + } + for (int myRepNum = 0; myRepNum < shardCounts.totalReplicasInLiveNodes; myRepNum++) { + addNewReplica(replicas, collection, slice, otherLiveNodes); + } + for (int myRepNum = 0; myRepNum < shardCounts.totalReplicasInDownNodes; myRepNum++) { + addNewReplica(replicas, collection, slice, downNodes); + } + Map<String, Replica> replicaMap = replicas.stream().collect(Collectors.toMap(Replica::getName, Function.identity())); + sliceMap.put(slice, new Slice(slice, replicaMap, map(), collection)); } - return false; + DocCollection col = new DocCollection(collection, sliceMap, map(), DocRouter.DEFAULT); + collToState.put(collection, col); } - - - @Override - public int hashCode() { - return replicaName.hashCode(); - } - - CloudDescriptor getCloudDescriptor() { - return cd; - - } - - public Replica getReplica(String node) { - return new Replica(replicaName, Utils.makeMap("core", replicaName, "node_name", node), cd.getCollectionName(), cd.getShardId()); - } - - public boolean equals(String coll, String slice) { - return cd.getCollectionName().equals(coll) && slice.equals(cd.getShardId()); + // reverse map + Map<CoreDescriptor, CountsForEachShard> myDescsToCounts = new HashMap<>(); + for (Map.Entry<CountsForEachShard, List<CoreDescriptor>> entry : myCountsToDescs.entrySet()) { + for (CoreDescriptor descriptor : entry.getValue()) { + CountsForEachShard prev = myDescsToCounts.put(descriptor, entry.getKey()); + assert prev == null; Review comment: I deliberately used a plain assert because this assert would mean the test _itself_ is broken. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org For additional commands, e-mail: issues-h...@lucene.apache.org