This is an automated email from the ASF dual-hosted git repository. yiguolei pushed a commit to branch branch-2.1 in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-2.1 by this push: new 81ea088c962 branch-2.1: [fix](backup) Save snapshot meta during replay #49550 (#49606) 81ea088c962 is described below commit 81ea088c962fa401bcde3c52ff2e5d64b7e40df3 Author: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> AuthorDate: Wed Apr 9 14:19:19 2025 +0800 branch-2.1: [fix](backup) Save snapshot meta during replay #49550 (#49606) Cherry-picked from #49550 Co-authored-by: walter <maoch...@selectdb.com> --- .../java/org/apache/doris/backup/BackupJob.java | 12 ++- .../test_backup_restore_get_snapshot.groovy | 102 +++++++++++++++++++++ 2 files changed, 111 insertions(+), 3 deletions(-) diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java index fb45f6acf0c..89b0ddaa949 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java @@ -342,7 +342,9 @@ public class BackupJob extends AbstractJob { @Override public synchronized void replayRun() { - // nothing to do + if (state == BackupJobState.SAVE_META) { + saveMetaInfo(true); + } } @Override @@ -438,7 +440,7 @@ public class BackupJob extends AbstractJob { waitingAllUploadingFinished(); break; case SAVE_META: - saveMetaInfo(); + saveMetaInfo(false); break; case UPLOAD_INFO: uploadMetaAndJobInfoFile(); @@ -827,7 +829,7 @@ public class BackupJob extends AbstractJob { } } - private void saveMetaInfo() { + private void saveMetaInfo(boolean replay) { String createTimeStr = TimeUtils.longToTimeString(createTime, TimeUtils.getDatetimeFormatWithHyphenWithTimeZone()); // local job dir: backup/repo__repo_id/label__createtime/ @@ -888,6 +890,10 @@ public class BackupJob extends AbstractJob { return; } + if (replay) { + return; + } + state = BackupJobState.UPLOAD_INFO; // meta info and job info has been saved to local file, this can be cleaned to reduce log size diff --git a/regression-test/suites/backup_restore/test_backup_restore_get_snapshot.groovy b/regression-test/suites/backup_restore/test_backup_restore_get_snapshot.groovy new file mode 100644 index 00000000000..5afbac3f066 --- /dev/null +++ b/regression-test/suites/backup_restore/test_backup_restore_get_snapshot.groovy @@ -0,0 +1,102 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.apache.doris.regression.Config +import org.apache.doris.regression.suite.ClusterOptions +import org.apache.doris.regression.suite.client.FrontendClientImpl + +suite("test_backup_restore_get_snapshot", "backup_restore,docker") { + def options = new ClusterOptions() + options.beConfigs += ["enable_java_support=false"] + options.feNum = 3 + options.beNum = 3 + options.beDisks = ['HDD=1', 'SSD=1'] + + docker(options) { + def syncer = getSyncer() + + sql """ + CREATE TABLE backup_table ( + `id` LARGEINT NOT NULL, + `count` LARGEINT SUM DEFAULT "0") + AGGREGATE KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 2 + PROPERTIES + ( + "replication_num" = "3" + ) + """ + + List<String> values = [] + for (int i = 1; i <= 10; ++i) { + values.add("(${i}, ${i})") + } + sql "INSERT INTO backup_table VALUES ${values.join(",")}" + def result = sql "SELECT * FROM backup_table" + assertEquals(result.size(), values.size()); + + sql """ + BACKUP SNAPSHOT snapshot_name + TO `__keep_on_local__` + ON (backup_table) + """ + + syncer.waitSnapshotFinish() + + // Stop the master FE, to elect a follower as the new master. + def stopFeIndex = cluster.getMasterFe().index + cluster.stopFrontends(stopFeIndex) + + // Wait a new master + def masterElected = false + for (int i = 0; i < 60; i++) { + def frontends = cluster.getFrontends(); + for (int j = 0; j < frontends.size(); j++) { + logger.info("stop fe {}, idx {}, is master {}", stopFeIndex, j, frontends.get(j).isMaster) + if (j == stopFeIndex) { + continue + } + if (frontends.get(j).isMaster) { + masterElected = true + break + } + } + if (masterElected) { + break + } + sleep(1000) + } + + assertTrue(masterElected) + + // Run master sql, force to referesh the underlying + def user = context.config.jdbcUser + def password = context.config.jdbcPassword + def fe = cluster.getMasterFe() + def url = String.format( + "jdbc:mysql://%s:%s/?useLocalSessionState=true&allowLoadLocalInfile=false", + fe.host, fe.queryPort) + url = Config.buildUrlWithDb(url, context.dbName) + logger.info("connect to docker cluster: suite={}, url={}", name, url) + connect(user, password, url) { + // The snapshot backupMeta & snapshotInfo must exists. + assertTrue(syncer.getSnapshot("snapshot_name", "backup_table")) + } + } +} + + --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For additional commands, e-mail: commits-h...@doris.apache.org