ndimiduk commented on code in PR #6370:
URL: https://github.com/apache/hbase/pull/6370#discussion_r1824281854


##########
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java:
##########
@@ -43,8 +44,13 @@ private BackupRestoreFactory() {
    * @return backup restore job instance
    */
   public static RestoreJob getRestoreJob(Configuration conf) {
+    Class<? extends RestoreJob> defaultCls =
+      conf.getBoolean(RestoreJob.KEEP_ORIGINAL_SPLITS_OPT, false)

Review Comment:
   We want the new behavior by default, don't we? Is there a reason to offer 
the old behavior at all?



##########
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java:
##########
@@ -197,55 +202,56 @@ protected List<byte[]> handleBulkLoad(List<TableName> 
sTableList) throws IOExcep
           }
         }
       }
+      mergeSplitBulkloads(activeFiles, archiveFiles, srcTable);
+      incrementalCopyBulkloadHFiles(tgtFs, srcTable);
     }
-
-    copyBulkLoadedFiles(activeFiles, archiveFiles);
-
     return pair.getSecond();
   }
 
-  private void copyBulkLoadedFiles(List<String> activeFiles, List<String> 
archiveFiles)
-    throws IOException {
-    try {
-      // Enable special mode of BackupDistCp
-      conf.setInt(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY, 5);
-      // Copy active files
-      String tgtDest = backupInfo.getBackupRootDir() + Path.SEPARATOR + 
backupInfo.getBackupId();
-      int attempt = 1;
-      while (activeFiles.size() > 0) {
-        LOG.info("Copy " + activeFiles.size() + " active bulk loaded files. 
Attempt =" + attempt++);
-        String[] toCopy = new String[activeFiles.size()];
-        activeFiles.toArray(toCopy);
-        // Active file can be archived during copy operation,
-        // we need to handle this properly
-        try {
-          incrementalCopyHFiles(toCopy, tgtDest);
-          break;
-        } catch (IOException e) {
-          // Check if some files got archived
-          // Update active and archived lists
-          // When file is being moved from active to archive
-          // directory, the number of active files decreases
-          int numOfActive = activeFiles.size();
-          updateFileLists(activeFiles, archiveFiles);
-          if (activeFiles.size() < numOfActive) {
-            continue;
-          }
-          // if not - throw exception
-          throw e;
+  private void mergeSplitBulkloads(List<String> activeFiles, List<String> 
archiveFiles,
+    TableName tn) throws IOException {
+    int attempt = 1;
+
+    while (!activeFiles.isEmpty()) {
+      LOG.info("MergeSplit {} active bulk loaded files. Attempt={}", 
activeFiles.size(), attempt++);
+      // Active file can be archived during copy operation,
+      // we need to handle this properly
+      try {
+        mergeSplitBulkloads(activeFiles, tn);
+        break;
+      } catch (IOException e) {
+        int numActiveFiles = activeFiles.size();
+        updateFileLists(activeFiles, archiveFiles);
+        if (activeFiles.size() < numActiveFiles) {
+          continue;
         }
+
+        throw e;
       }
-      // If incremental copy will fail for archived files
-      // we will have partially loaded files in backup destination (only files 
from active data
-      // directory). It is OK, because the backup will marked as FAILED and 
data will be cleaned up
-      if (archiveFiles.size() > 0) {
-        String[] toCopy = new String[archiveFiles.size()];
-        archiveFiles.toArray(toCopy);
-        incrementalCopyHFiles(toCopy, tgtDest);
+    }
+
+    if (!archiveFiles.isEmpty()) {
+      mergeSplitBulkloads(archiveFiles, tn);
+    }
+  }
+
+  private void mergeSplitBulkloads(List<String> files, TableName tn) throws 
IOException {
+    MapReduceHFileSplitterJob player = new MapReduceHFileSplitterJob();
+    conf.set(MapReduceHFileSplitterJob.BULK_OUTPUT_CONF_KEY,
+      getBulkOutputDirForTable(tn).toString());
+    player.setConf(conf);
+
+    String inputDirs = StringUtils.join(files, ",");
+    String[] args = { inputDirs, tn.getNameAsString() };

Review Comment:
   Do you need any additional handling for when the table is in a non-`default` 
namespace?



##########
hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotRegionLocator.java:
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import org.apache.commons.lang3.NotImplementedException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
+
+@InterfaceAudience.Private
+public final class SnapshotRegionLocator implements RegionLocator {
+
+  private static final String SNAPSHOT_MANIFEST_DIR_PREFIX =
+    "region.locator.snapshot.manifest.dir.";
+
+  private static final ServerName FAKE_SERVER_NAME =
+    ServerName.parseServerName("www.hbase.com,1234,1212121212");

Review Comment:
   Better to use an `example.net` hostname.



##########
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java:
##########
@@ -343,6 +350,7 @@ protected void incrementalCopyHFiles(String[] files, String 
backupDest) throws I
     try {
       LOG.debug("Incremental copy HFiles is starting. dest=" + backupDest);
       // set overall backup phase: incremental_copy
+      // TODO - This should now happen elsewhere maybe

Review Comment:
   can you elaborate on this idea? preferably in the comment where it sits in 
code.



##########
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java:
##########
@@ -437,6 +445,29 @@ protected void walToHFiles(List<String> dirPaths, 
List<String> tableList) throws
     }
   }
 
+  private void incrementalCopyBulkloadHFiles(FileSystem tgtFs, TableName tn) 
throws IOException {
+    Path bulkOutDir = getBulkOutputDirForTable(tn);
+    FileSystem fs = FileSystem.get(conf);
+
+    if (fs.exists(bulkOutDir)) {

Review Comment:
   When does the output dir not exist? Is that an error case?



##########
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java:
##########
@@ -437,6 +445,29 @@ protected void walToHFiles(List<String> dirPaths, 
List<String> tableList) throws
     }
   }
 
+  private void incrementalCopyBulkloadHFiles(FileSystem tgtFs, TableName tn) 
throws IOException {
+    Path bulkOutDir = getBulkOutputDirForTable(tn);
+    FileSystem fs = FileSystem.get(conf);
+
+    if (fs.exists(bulkOutDir)) {
+      conf.setInt(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY, 2);

Review Comment:
   Mutating the conf in-place feels so wrong to me, but copying a conf is _so_ 
expensive...



##########
hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java:
##########
@@ -235,6 +277,92 @@ public void TestIncBackupRestore() throws Exception {
     }
   }
 
+  @Test
+  public void TestIncBackupRestoreWithOriginalSplits() throws Exception {
+    byte[] fam1 = Bytes.toBytes("f");
+    byte[] mobFam = Bytes.toBytes("mob");
+
+    List<TableName> tables = Lists.newArrayList(table1);
+    TableDescriptor newTable1Desc =
+      
TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder
+        
.newBuilder(mobFam).setMobEnabled(true).setMobThreshold(5L).build()).build();
+    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
+
+    try (Connection conn = ConnectionFactory.createConnection(conf1)) {
+      BackupAdminImpl backupAdmin = new BackupAdminImpl(conn);
+      BackupRequest request = createBackupRequest(BackupType.FULL, tables, 
BACKUP_ROOT_DIR);
+      String fullBackupId = backupAdmin.backupTables(request);
+      assertTrue(checkSucceeded(fullBackupId));
+
+      TableName[] fromTables = new TableName[] { table1 };
+      TableName[] toTables = new TableName[] { table1_restore };
+      backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, 
fullBackupId, false,
+        fromTables, toTables, true, true));
+
+      Table table = conn.getTable(table1_restore);
+      Assert.assertEquals(HBaseTestingUtil.countRows(table), NB_ROWS_IN_BATCH);
+
+      int ROWS_TO_ADD = 1_000;
+      // different IDs so that rows don't overlap
+      insertIntoTable(conn, table1, fam1, 3, ROWS_TO_ADD);
+      insertIntoTable(conn, table1, mobFam, 4, ROWS_TO_ADD);
+
+      Admin admin = conn.getAdmin();
+      List<HRegion> currentRegions = 
TEST_UTIL.getHBaseCluster().getRegions(table1);
+      for (HRegion region : currentRegions) {
+        byte[] name = region.getRegionInfo().getEncodedNameAsBytes();
+        admin.splitRegionAsync(name).get();
+      }
+
+      TEST_UTIL.waitTableAvailable(table1);
+
+      // Make sure we've split regions
+      assertNotEquals(currentRegions, 
TEST_UTIL.getHBaseCluster().getRegions(table1));
+
+      request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
+      String incrementalBackupId = backupAdmin.backupTables(request);
+      assertTrue(checkSucceeded(incrementalBackupId));
+      backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, 
incrementalBackupId,
+        false, fromTables, toTables, true, true));
+      Assert.assertEquals(HBaseTestingUtil.countRows(table),
+        NB_ROWS_IN_BATCH + ROWS_TO_ADD + ROWS_TO_ADD);
+
+      // test bulkloads
+      HRegion regionToBulkload = 
TEST_UTIL.getHBaseCluster().getRegions(table1).get(0);
+      String regionName = regionToBulkload.getRegionInfo().getEncodedName();
+
+      insertIntoTable(conn, table1, fam1, 5, ROWS_TO_ADD);
+      insertIntoTable(conn, table1, mobFam, 6, ROWS_TO_ADD);
+
+      doBulkload(table1, regionName, famName, mobFam);
+
+      // we need to major compact the regions to make sure there are no 
references
+      // and the regions are once again splittable
+      TEST_UTIL.compact(true);
+      TEST_UTIL.flush();
+      TEST_UTIL.waitTableAvailable(table1);
+
+      for (HRegion region : TEST_UTIL.getHBaseCluster().getRegions(table1)) {
+        if (region.isSplittable()) {
+          
admin.splitRegionAsync(region.getRegionInfo().getEncodedNameAsBytes()).get();
+        }
+      }
+
+      request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
+      incrementalBackupId = backupAdmin.backupTables(request);
+      assertTrue(checkSucceeded(incrementalBackupId));
+
+      backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, 
incrementalBackupId,
+        false, fromTables, toTables, true, true));
+
+      table = conn.getTable(table1);
+      int rowsExpected = HBaseTestingUtil.countRows(table, famName, mobFam);
+      table = conn.getTable(table1_restore);
+
+      Assert.assertEquals(HBaseTestingUtil.countRows(table, famName, mobFam), 
rowsExpected);
+    }

Review Comment:
   should also assert that the backup files remain in place -- the restore 
didn't move anything out of the backup directories. In fact, this would be a 
great assert to include on every test. Or another PR, if you prefer.



##########
hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotRegionLocator.java:
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import org.apache.commons.lang3.NotImplementedException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
+
+@InterfaceAudience.Private
+public final class SnapshotRegionLocator implements RegionLocator {
+
+  private static final String SNAPSHOT_MANIFEST_DIR_PREFIX =
+    "region.locator.snapshot.manifest.dir.";
+
+  private static final ServerName FAKE_SERVER_NAME =
+    ServerName.parseServerName("www.hbase.com,1234,1212121212");
+
+  private final TableName tableName;
+  private final TreeMap<byte[], HRegionReplicas> regions;
+
+  private final List<HRegionLocation> rawLocations;
+
+  public static SnapshotRegionLocator create(Configuration conf, TableName 
table)
+    throws IOException {
+    Path workingDir = new Path(conf.get(getSnapshotManifestDir(table)));
+    FileSystem fs = workingDir.getFileSystem(conf);
+    SnapshotProtos.SnapshotDescription desc =
+      SnapshotDescriptionUtils.readSnapshotInfo(fs, workingDir);
+    SnapshotManifest manifest = SnapshotManifest.open(conf, fs, workingDir, 
desc);
+
+    TableName tableName = manifest.getTableDescriptor().getTableName();
+    TreeMap<byte[], HRegionReplicas> replicas = new 
TreeMap<>(Bytes.BYTES_COMPARATOR);
+    List<HRegionLocation> rawLocations = new ArrayList<>();
+
+    for (SnapshotProtos.SnapshotRegionManifest region : 
manifest.getRegionManifests()) {
+      HBaseProtos.RegionInfo ri = region.getRegionInfo();
+      byte[] key = ri.getStartKey().toByteArray();
+
+      SnapshotHRegionLocation location = toLocation(ri, tableName);
+      rawLocations.add(location);
+      HRegionReplicas hrr = replicas.get(key);
+
+      if (hrr == null) {
+        hrr = new HRegionReplicas(location);
+      } else {
+        hrr.addReplica(location);
+      }
+
+      replicas.put(key, hrr);
+    }
+
+    return new SnapshotRegionLocator(tableName, replicas, rawLocations);
+  }
+
+  private SnapshotRegionLocator(TableName tableName, TreeMap<byte[], 
HRegionReplicas> regions,
+    List<HRegionLocation> rawLocations) {
+    this.tableName = tableName;
+    this.regions = regions;
+    this.rawLocations = rawLocations;
+  }
+
+  @Override
+  public HRegionLocation getRegionLocation(byte[] row, int replicaId, boolean 
reload)
+    throws IOException {
+    return regions.floorEntry(row).getValue().getReplica(replicaId);
+  }
+
+  @Override
+  public List<HRegionLocation> getRegionLocations(byte[] row, boolean reload) 
throws IOException {
+    return List.of(getRegionLocation(row, reload));
+  }
+
+  @Override
+  public void clearRegionLocationCache() {
+
+  }
+
+  @Override
+  public List<HRegionLocation> getAllRegionLocations() throws IOException {
+    return rawLocations;
+  }
+
+  @Override
+  public TableName getName() {
+    return tableName;
+  }
+
+  @Override
+  public void close() throws IOException {
+
+  }
+
+  public static boolean shouldUseSnapshotRegionLocator(Configuration conf, 
TableName table) {
+    return conf.get(getSnapshotManifestDir(table)) != null;
+  }
+
+  public static void setSnapshotManifestDir(Configuration conf, String dir, 
TableName table) {
+    conf.set(getSnapshotManifestDir(table), dir);
+  }
+
+  private static String getSnapshotManifestDir(TableName table) {
+    return SNAPSHOT_MANIFEST_DIR_PREFIX + 
table.getNameAsString().replaceAll("-", "_");

Review Comment:
   Again, we don't honor namespace in the snapshot table path?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@hbase.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to