vagetablechicken commented on a change in pull request #5010:
URL: https://github.com/apache/incubator-doris/pull/5010#discussion_r539165745



##########
File path: 
fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java
##########
@@ -0,0 +1,324 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.clone;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Ordering;
+import com.google.common.collect.TreeMultimap;
+import org.apache.doris.catalog.Replica;
+import org.apache.doris.catalog.TabletInvertedIndex;
+import org.apache.doris.catalog.TabletMeta;
+import org.apache.doris.common.Config;
+import org.apache.doris.common.Pair;
+import org.apache.doris.system.SystemInfoService;
+import org.apache.doris.thrift.TStorageMedium;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
+
+public class PartitionRebalancer extends Rebalancer {
+    private static final Logger LOG = 
LogManager.getLogger(PartitionRebalancer.class);
+
+    private final TwoDimensionalGreedyAlgo algo = new 
TwoDimensionalGreedyAlgo();
+    protected final MovesInProgressCache movesInProgressCache = new 
MovesInProgressCache();
+
+    private final AtomicLong counterBalanceMoveCreated = new AtomicLong(0);
+    private final AtomicLong counterBalanceMoveSucceeded = new AtomicLong(0);
+
+    public PartitionRebalancer(SystemInfoService infoService, 
TabletInvertedIndex invertedIndex) {
+        super(infoService, invertedIndex);
+    }
+
+    @Override
+    protected List<TabletSchedCtx> selectAlternativeTabletsForCluster(
+            String clusterName, ClusterLoadStatistic clusterStat, 
TStorageMedium medium) {
+        MovesInProgressCache.Cell movesInProgress = 
movesInProgressCache.getCache(clusterName, medium);
+        Preconditions.checkNotNull(movesInProgress, "clusterStat is got from 
statisticMap, movesInProgressMap should have the same entry");
+
+        // iterating through cache.asMap().values() does not reset access time 
for the entries you retrieve.
+        List<ReplicaMove> movesInProgressList = 
movesInProgress.get().asMap().values()
+                .stream().map(p -> p.first).collect(Collectors.toList());
+        List<Long> toDeleteKeys = Lists.newArrayList();
+
+        // The problematic movements will be found in buildClusterInfo(), so 
here is a simply move completion check
+        // of moves which have valid ToDeleteReplica.
+        List<ReplicaMove> movesNeedCheck = 
movesInProgress.get().asMap().values()
+                .stream().filter(p -> p.second != -1L).map(p -> 
p.first).collect(Collectors.toList());
+        checkMovesCompleted(movesNeedCheck, toDeleteKeys);
+
+        ClusterBalanceInfo clusterBalanceInfo = new ClusterBalanceInfo();
+        // We should assume the in-progress moves have been succeeded to avoid 
producing the same moves.
+        // Apply in-progress moves to current cluster stats, use 
TwoDimensionalGreedyAlgo.ApplyMove for simplicity.
+        if (!buildClusterInfo(clusterStat, medium, movesInProgressList, 
clusterBalanceInfo, toDeleteKeys)) {
+            return Lists.newArrayList();
+        }
+
+        // Just delete the completed or problematic moves
+        if (!toDeleteKeys.isEmpty()) {
+            movesInProgress.get().invalidateAll(toDeleteKeys);
+            movesInProgressList = movesInProgressList.stream()
+                    .filter(m -> 
!toDeleteKeys.contains(m.tabletId)).collect(Collectors.toList());
+        }
+
+        if (movesInProgressCache.size() > Config.max_balancing_tablets) {
+            LOG.debug("Total in-progress moves > {}", 
Config.max_balancing_tablets);
+            return Lists.newArrayList();
+        }
+
+        NavigableSet<Long> skews = 
clusterBalanceInfo.partitionInfoBySkew.keySet();
+        LOG.debug("Cluster {}-{}: peek max skew {}, assume {} in-progress 
moves are succeeded {}", clusterName, medium,
+                skews.isEmpty() ? 0 : skews.last(), 
movesInProgressList.size(), movesInProgressList);
+
+        List<TwoDimensionalGreedyAlgo.PartitionReplicaMove> moves = 
algo.getNextMoves(clusterBalanceInfo, 
Config.rebalance_max_moves_num_per_selection);
+
+        List<TabletSchedCtx> alternativeTablets = Lists.newArrayList();
+        List<Long> inProgressIds = movesInProgressList.stream().map(m -> 
m.tabletId).collect(Collectors.toList());
+        for (TwoDimensionalGreedyAlgo.PartitionReplicaMove move : moves) {
+            // Find all tablets of the specified partition that would have a 
replica at the source be,
+            // but would not have a replica at the destination be. That is to 
satisfy the restriction
+            // of having no more than one replica of the same tablet per be.
+            List<Long> tabletIds = 
invertedIndex.getTabletIdsByBackendIdAndStorageMedium(move.fromBe, medium);
+            List<Long> invalidIds = 
invertedIndex.getTabletIdsByBackendIdAndStorageMedium(move.toBe, medium);
+            tabletIds.removeAll(invalidIds);
+            // In-progress tablets can't be the candidate too.
+            tabletIds.removeAll(inProgressIds);
+
+            Map<Long, TabletMeta> tabletCandidates = Maps.newHashMap();
+            for (long tabletId : tabletIds) {
+                TabletMeta tabletMeta = invertedIndex.getTabletMeta(tabletId);
+                if (tabletMeta != null && tabletMeta.getPartitionId() == 
move.partitionId
+                        && tabletMeta.getIndexId() == move.indexId) {
+                    tabletCandidates.put(tabletId, tabletMeta);
+                }
+            }
+            LOG.debug("Find {} candidates for move {}", 
tabletCandidates.size(), move);
+            if (tabletCandidates.isEmpty()) {
+                continue;
+            }
+
+            // Random pick one candidate to create tabletSchedCtx
+            Random rand = new Random();
+            Object[] keys = tabletCandidates.keySet().toArray();
+            long pickedTabletId = (long) keys[rand.nextInt(keys.length)];
+            LOG.debug("Picked tablet id for move {}: {}", move, 
pickedTabletId);
+
+            TabletMeta tabletMeta = tabletCandidates.get(pickedTabletId);
+            TabletSchedCtx tabletCtx = new 
TabletSchedCtx(TabletSchedCtx.Type.BALANCE, clusterName,
+                    tabletMeta.getDbId(), tabletMeta.getTableId(), 
tabletMeta.getPartitionId(),
+                    tabletMeta.getIndexId(), pickedTabletId, 
System.currentTimeMillis());
+            // Balance task's priority is always LOW
+            tabletCtx.setOrigPriority(TabletSchedCtx.Priority.LOW);
+            alternativeTablets.add(tabletCtx);
+            // Pair<Move, ToDeleteReplicaId>, ToDeleteReplicaId should be -1L 
before scheduled successfully
+            movesInProgress.get().put(pickedTabletId, new Pair<>(new 
ReplicaMove(pickedTabletId, move.fromBe, move.toBe), -1L));
+            counterBalanceMoveCreated.incrementAndGet();
+            // synchronize with movesInProgress
+            inProgressIds.add(pickedTabletId);
+        }
+
+        if (moves.isEmpty()) {
+            // Balanced cluster should not print too much log messages.
+            LOG.debug("Cluster {}-{}: cluster is balanced.", clusterName, 
medium);
+        } else {
+            LOG.info("Cluster {}-{}: get {} moves, actually select {} 
alternative tablets to move. Tablets detail: {}",
+                    clusterName, medium, moves.size(), 
alternativeTablets.size(),
+                    
alternativeTablets.stream().mapToLong(TabletSchedCtx::getTabletId).toArray());
+        }
+        return alternativeTablets;
+    }
+
+    private boolean buildClusterInfo(ClusterLoadStatistic clusterStat, 
TStorageMedium medium,
+                                     List<ReplicaMove> movesInProgress, 
ClusterBalanceInfo info, List<Long> toDeleteKeys) {
+        Preconditions.checkState(info.beByTotalReplicaCount.isEmpty() && 
info.partitionInfoBySkew.isEmpty(), "");
+
+        // If we wanna modify the PartitionBalanceInfo in 
info.beByTotalReplicaCount, deep-copy it
+        
info.beByTotalReplicaCount.putAll(clusterStat.getBeByTotalReplicaMap(medium));
+        info.partitionInfoBySkew.putAll(clusterStat.getSkewMap(medium));
+
+        // Skip the toDeleteKeys
+        List<ReplicaMove> filteredMoves = movesInProgress.stream().filter(m -> 
!toDeleteKeys.contains(m.tabletId)).collect(Collectors.toList());
+
+        for (ReplicaMove move : filteredMoves) {
+            TabletMeta meta = invertedIndex.getTabletMeta(move.tabletId);
+            if (meta == null) {
+                // Move's tablet is invalid, need delete it
+                toDeleteKeys.add(move.tabletId);
+                continue;
+            }
+
+            TwoDimensionalGreedyAlgo.PartitionReplicaMove partitionMove = new 
TwoDimensionalGreedyAlgo.
+                    PartitionReplicaMove(meta.getPartitionId(), 
meta.getIndexId(), move.fromBe, move.toBe);
+            boolean st = TwoDimensionalGreedyAlgo.applyMove(partitionMove, 
info.beByTotalReplicaCount, info.partitionInfoBySkew);
+            if (!st) {
+                // Can't apply this move, mark it failed, continue to apply 
the next.
+                toDeleteKeys.add(move.tabletId);
+            }
+        }
+        return true;
+    }
+
+    private void checkMovesCompleted(List<ReplicaMove> moves, List<Long> 
toDeleteKeys) {
+        boolean move_is_complete;
+        for (ReplicaMove move : moves) {
+            move_is_complete = checkMoveCompleted(move);
+            // If the move was completed, remove it
+            if (move_is_complete) {
+                toDeleteKeys.add(move.tabletId);
+                LOG.debug("Move {} is completed. The cur dist: {}", move,
+                        
invertedIndex.getReplicasByTabletId(move.tabletId).stream().map(Replica::getBackendId).collect(Collectors.toList()));
+                counterBalanceMoveSucceeded.incrementAndGet();
+            }
+        }
+    }
+
+    // Move completed: fromBe doesn't have a replica and toBe has a replica
+    private boolean checkMoveCompleted(ReplicaMove move) {
+        Long tabletId = move.tabletId;
+        List<Long> bes = 
invertedIndex.getReplicasByTabletId(tabletId).stream().map(Replica::getBackendId).collect(Collectors.toList());
+        return !bes.contains(move.fromBe) && bes.contains(move.toBe);
+    }
+
+    @Override
+    protected void completeSchedCtx(TabletSchedCtx tabletCtx, Map<Long, 
TabletScheduler.PathSlot> backendsWorkingSlots)
+            throws SchedException {
+        MovesInProgressCache.Cell movesInProgress = 
movesInProgressCache.getCache(tabletCtx.getCluster(), 
tabletCtx.getStorageMedium());
+        Preconditions.checkNotNull(movesInProgress, "clusterStat is got from 
statisticMap, movesInProgressMap should have the same entry");
+
+        try {
+            Pair<ReplicaMove, Long> pair = 
movesInProgress.get().getIfPresent(tabletCtx.getTabletId());
+            Preconditions.checkNotNull(pair, "No cached move for tablet: " + 
tabletCtx.getTabletId());
+
+            ReplicaMove move = pair.first;
+            checkMoveValidation(move);
+
+            // Check src replica's validation
+            Replica srcReplica = 
tabletCtx.getTablet().getReplicaByBackendId(move.fromBe);
+            Preconditions.checkNotNull(srcReplica);
+            TabletScheduler.PathSlot slot = 
backendsWorkingSlots.get(srcReplica.getBackendId());
+            Preconditions.checkNotNull(slot, "unable to get fromBe " + 
srcReplica.getBackendId() + " slot");
+            if (slot.takeBalanceSlot(srcReplica.getPathHash()) != -1) {
+                tabletCtx.setSrc(srcReplica);
+            } else {
+                throw new 
SchedException(SchedException.Status.SCHEDULE_FAILED, "no slot for src replica 
" + srcReplica + ", pathHash " + srcReplica.getPathHash());
+            }
+
+            // Choose a path in destination
+            ClusterLoadStatistic clusterStat = 
statisticMap.get(tabletCtx.getCluster());
+            Preconditions.checkNotNull(clusterStat, "cluster does not exist: " 
+ tabletCtx.getCluster());
+            BackendLoadStatistic beStat = 
clusterStat.getBackendLoadStatistic(move.toBe);
+            Preconditions.checkNotNull(beStat);
+            slot = backendsWorkingSlots.get(move.toBe);
+            Preconditions.checkNotNull(slot, "unable to get slot of toBe " + 
move.toBe);
+
+            List<RootPathLoadStatistic> paths = beStat.getPathStatistics();
+            Set<Long> availPath = paths.stream().filter(path -> 
path.getStorageMedium() == tabletCtx.getStorageMedium()
+                    && path.isFit(tabletCtx.getTabletSize(), false) == 
BalanceStatus.OK)
+                    
.map(RootPathLoadStatistic::getPathHash).collect(Collectors.toSet());
+            long pathHash = slot.takeAnAvailBalanceSlotFrom(availPath);
+            if (pathHash == -1) {
+                throw new 
SchedException(SchedException.Status.SCHEDULE_FAILED, "paths has no available 
balance slot: " + availPath);
+            } else {
+                tabletCtx.setDest(beStat.getBeId(), pathHash);
+            }
+
+            // ToDeleteReplica is the source replica
+            pair.second = srcReplica.getId();
+        } catch (IllegalStateException | NullPointerException e) {
+            // Problematic move should be invalidated immediately
+            movesInProgress.get().invalidate(tabletCtx.getTabletId());
+            throw new SchedException(SchedException.Status.UNRECOVERABLE, 
e.getMessage());
+        }
+    }
+
+    // The validation check cannot be accurate, cuz the production of moves do 
have ordering.
+    // If some moves failed, the cluster & partition skew is different to the 
skew when we getNextMove.
+    // So we can't do skew check.
+    // Just do some basic checks, e.g. server available.
+    private void checkMoveValidation(ReplicaMove move) throws 
IllegalStateException {
+        boolean fromAvailable = infoService.checkBackendAvailable(move.fromBe);
+        boolean toAvailable = infoService.checkBackendAvailable(move.toBe);
+        Preconditions.checkState(fromAvailable && toAvailable, move + "'s bes 
are not all available: from " + fromAvailable + ", to " + toAvailable);
+        // To be improved
+    }

Review comment:
       Not all, just pick one tablet to generate one move. and apply the move 
to current balance info, it's in the algo implement.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to