This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch ozone-2.1
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 406622eaee9a9a3f2eb92de57970a1d4daec52fa
Author: Wei-Chiu Chuang <[email protected]>
AuthorDate: Wed Nov 19 12:59:54 2025 -0800

    Revert "HDDS-13070. OM Follower changes to create and place sst files from 
hardlink file. (#8761)"
    
    This reverts commit c98c10b71c02dbc7875a61ad21c73237b9aebd84.
    
     Conflicts:
            
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java
            
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
    
    Change-Id: Ie562cb0b08b60c6451bac23051807b303d77bb9d
---
 .../hadoop/hdds/utils/DBCheckpointServlet.java     |  3 +-
 .../java/org/apache/hadoop/hdds/utils/HAUtils.java | 19 +++++----
 .../hadoop/hdds/utils/RDBSnapshotProvider.java     |  9 ++--
 .../hadoop/hdds/utils/TestRDBSnapshotProvider.java | 18 ++++----
 .../TestOMDbCheckpointServletInodeBasedXfer.java   | 23 +++-------
 .../hadoop/ozone/om/TestOMRatisSnapshots.java      | 37 ++++++++--------
 .../snapshot/TestOzoneManagerSnapshotProvider.java |  2 -
 .../om/OMDBCheckpointServletInodeBasedXfer.java    |  5 ++-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  2 +-
 .../hadoop/ozone/om/OzoneManagerHttpServer.java    |  2 +-
 .../om/ratis_snapshot/OmRatisSnapshotProvider.java |  2 +-
 .../hadoop/ozone/om/snapshot/OmSnapshotUtils.java  | 28 ++-----------
 .../hadoop/ozone/om/TestOmSnapshotManager.java     | 49 +++++++++++++++++-----
 .../spi/impl/OzoneManagerServiceProviderImpl.java  |  2 -
 14 files changed, 101 insertions(+), 100 deletions(-)

diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java
index 26c93cd1792..118a17fbb5d 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java
@@ -20,6 +20,7 @@
 import static 
org.apache.hadoop.hdds.utils.HddsServerUtil.writeDBCheckpointToStream;
 import static 
org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
 import static 
org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST;
+import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX;
 
 import com.google.common.annotations.VisibleForTesting;
 import java.io.File;
@@ -281,7 +282,7 @@ protected static Set<String> 
extractSstFilesToExclude(String[] sstParam) {
     Set<String> receivedSstFiles = new HashSet<>();
     if (sstParam != null) {
       receivedSstFiles.addAll(
-          Arrays.stream(sstParam).distinct().collect(Collectors.toList()));
+          Arrays.stream(sstParam).filter(s -> 
s.endsWith(ROCKSDB_SST_SUFFIX)).distinct().collect(Collectors.toList()));
       logSstFileList(receivedSstFiles, "Received list of {} SST files to be 
excluded{}: {}", 5);
     }
     return receivedSstFiles;
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
index 406736f5310..8b194e27b5d 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
@@ -23,6 +23,7 @@
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_INFO_WAIT_DURATION_DEFAULT;
 import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
 import static org.apache.hadoop.ozone.OzoneConsts.DB_TRANSIENT_MARKER;
+import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -318,24 +319,28 @@ public static File getMetaDir(DBDefinition definition,
   }
 
   /**
-   * Scan the DB dir and return the existing files,
-   * including omSnapshot files.
+   * Scan the DB dir and return the existing SST files,
+   * including omSnapshot sst files.
+   * SSTs could be used for avoiding repeated download.
    *
    * @param db the file representing the DB to be scanned
-   * @return the list of file names. If db not exist, will return empty list
+   * @return the list of SST file name. If db not exist, will return empty list
    */
-  public static List<String> getExistingFiles(File db) throws IOException {
+  public static List<String> getExistingSstFiles(File db) throws IOException {
     List<String> sstList = new ArrayList<>();
     if (!db.exists()) {
       return sstList;
     }
+
+    int truncateLength = db.toString().length() + 1;
     // Walk the db dir and get all sst files including omSnapshot files.
     try (Stream<Path> files = Files.walk(db.toPath())) {
-      sstList = files.filter(p -> p.toFile().isFile())
-          .map(p -> p.getFileName().toString()).
+      sstList =
+          files.filter(path -> path.toString().endsWith(ROCKSDB_SST_SUFFIX)).
+              map(p -> p.toString().substring(truncateLength)).
               collect(Collectors.toList());
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Scanned files {} in {}.", sstList, db.getAbsolutePath());
+        LOG.debug("Scanned SST files {} in {}.", sstList, 
db.getAbsolutePath());
       }
     }
     return sstList;
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/RDBSnapshotProvider.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/RDBSnapshotProvider.java
index b40a1f84e15..777efcf47ea 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/RDBSnapshotProvider.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/RDBSnapshotProvider.java
@@ -108,15 +108,14 @@ public DBCheckpoint downloadDBSnapshotFromLeader(String 
leaderNodeID)
     LOG.info("Prepare to download the snapshot from leader OM {} and " +
         "reloading state from the snapshot.", leaderNodeID);
     checkLeaderConsistency(leaderNodeID);
-    int numParts = 0;
 
     while (true) {
       String snapshotFileName = getSnapshotFileName(leaderNodeID);
       File targetFile = new File(snapshotDir, snapshotFileName);
       downloadSnapshot(leaderNodeID, targetFile);
-      LOG.info("Successfully download the latest snapshot {} from leader OM: 
{}, part : {}",
-          targetFile, leaderNodeID, numParts);
-      numParts++;
+      LOG.info(
+          "Successfully download the latest snapshot {} from leader OM: {}",
+          targetFile, leaderNodeID);
 
       numDownloaded.incrementAndGet();
       injectPause();
@@ -154,7 +153,7 @@ void checkLeaderConsistency(String currentLeader) throws 
IOException {
       return;
     }
 
-    List<String> files = HAUtils.getExistingFiles(candidateDir);
+    List<String> files = HAUtils.getExistingSstFiles(candidateDir);
     if (!files.isEmpty()) {
       LOG.warn("Candidate DB directory {} is not empty when last leader is " +
           "null.", candidateDir);
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java
index f8a1acf8739..7e141f1072d 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java
@@ -115,11 +115,9 @@ public void downloadSnapshot(String leaderNodeID, File 
targetFile)
                 concat(String.valueOf(a.length())))
             .collect(Collectors.toList()));
         try (OutputStream outputStream = 
Files.newOutputStream(targetFile.toPath())) {
-          Set<String> existingSstFiles = 
HAUtils.getExistingFiles(rdbSnapshotProvider.getCandidateDir())
-              .stream()
-              .filter(fName -> fName.endsWith(".sst") && !fName.equals(".sst"))
-              .collect(Collectors.toSet());
-          writeDBCheckpointToStream(dbCheckpoint, outputStream, 
existingSstFiles);
+          writeDBCheckpointToStream(dbCheckpoint, outputStream,
+              new HashSet<>(HAUtils.getExistingSstFiles(
+                  rdbSnapshotProvider.getCandidateDir())));
         }
       }
     };
@@ -142,7 +140,7 @@ public void testDownloadDBSnapshotFromLeader() throws 
Exception {
     assertTrue(candidateDir.exists());
 
     DBCheckpoint checkpoint;
-    int before = HAUtils.getExistingFiles(
+    int before = HAUtils.getExistingSstFiles(
         rdbSnapshotProvider.getCandidateDir()).size();
     assertEquals(0, before);
 
@@ -150,12 +148,12 @@ public void testDownloadDBSnapshotFromLeader() throws 
Exception {
     checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(LEADER_ID);
     File checkpointDir = checkpoint.getCheckpointLocation().toFile();
     assertEquals(candidateDir, checkpointDir);
-    int first = HAUtils.getExistingFiles(
+    int first = HAUtils.getExistingSstFiles(
         rdbSnapshotProvider.getCandidateDir()).size();
 
     // Get second snapshot
     checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(LEADER_ID);
-    int second = HAUtils.getExistingFiles(
+    int second = HAUtils.getExistingSstFiles(
         rdbSnapshotProvider.getCandidateDir()).size();
     assertThat(second).withFailMessage("The second snapshot should have more 
SST files")
         .isGreaterThan(first);
@@ -165,7 +163,7 @@ public void testDownloadDBSnapshotFromLeader() throws 
Exception {
 
     // Get third snapshot
     checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(LEADER_ID);
-    int third = HAUtils.getExistingFiles(
+    int third = HAUtils.getExistingSstFiles(
         rdbSnapshotProvider.getCandidateDir()).size();
     assertThat(third).withFailMessage("The third snapshot should have more SST 
files")
         .isGreaterThan(second);
@@ -174,7 +172,7 @@ public void testDownloadDBSnapshotFromLeader() throws 
Exception {
 
     // Test cleanup candidateDB
     rdbSnapshotProvider.init();
-    assertEquals(0, HAUtils.getExistingFiles(
+    assertEquals(0, HAUtils.getExistingSstFiles(
         rdbSnapshotProvider.getCandidateDir()).size());
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java
index 16d84dd4d0e..c3dd209acba 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java
@@ -228,6 +228,7 @@ void testContentsOfTarballWithSnapshot() throws Exception {
         .forEachRemaining(snapInfo -> 
snapshotPaths.add(getSnapshotDBPath(snapInfo.getCheckpointDir())));
     Set<String> inodesFromOmDataDir = new HashSet<>();
     Set<String> inodesFromTarball = new HashSet<>();
+    Set<Path> allPathsInTarball = new HashSet<>();
     try (Stream<Path> filesInTarball = Files.list(newDbDir.toPath())) {
       List<Path> files = filesInTarball.collect(Collectors.toList());
       for (Path p : files) {
@@ -237,6 +238,7 @@ void testContentsOfTarballWithSnapshot() throws Exception {
         }
         String inode = getInode(file.getName());
         inodesFromTarball.add(inode);
+        allPathsInTarball.add(p);
       }
     }
     Map<String, List<String>> hardLinkMapFromOmData = new HashMap<>();
@@ -272,23 +274,10 @@ void testContentsOfTarballWithSnapshot() throws Exception 
{
         "Number of generated YAML files should match the number of 
snapshots.");
 
     // create hardlinks now
-    OmSnapshotUtils.createHardLinks(newDbDir.toPath(), true);
-
-    if (includeSnapshot) {
-      List<String> yamlRelativePaths = snapshotPaths.stream().map(path -> {
-        int startIndex = path.indexOf("db.snapshots");
-        if (startIndex != -1) {
-          return path.substring(startIndex) + ".yaml";
-        }
-        return path + ".yaml";
-      }).collect(Collectors.toList());
-
-      for (String yamlRelativePath : yamlRelativePaths) {
-        String yamlFileName = Paths.get(newDbDir.getPath(), 
yamlRelativePath).toString();
-        assertTrue(Files.exists(Paths.get(yamlFileName)));
-      }
+    OmSnapshotUtils.createHardLinks(newDbDir.toPath());
+    for (Path old : allPathsInTarball) {
+      assertTrue(old.toFile().delete());
     }
-
     assertFalse(hardlinkFilePath.toFile().exists());
   }
 
@@ -317,7 +306,7 @@ public void testSnapshotDBConsistency() throws Exception {
     FileUtil.unTar(tempFile, newDbDir);
     Set<Path> allPathsInTarball = getAllPathsInTarball(newDbDir);
     // create hardlinks now
-    OmSnapshotUtils.createHardLinks(newDbDir.toPath(), false);
+    OmSnapshotUtils.createHardLinks(newDbDir.toPath());
     for (Path old : allPathsInTarball) {
       assertTrue(old.toFile().delete());
     }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
index a6a9e897a91..7e788819374 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
@@ -248,8 +248,8 @@ public void testInstallSnapshot(@TempDir Path tempDir) 
throws Exception {
     // Wait & for follower to update transactions to leader snapshot index.
     // Timeout error if follower does not load update within 10s
     GenericTestUtils.waitFor(() -> {
-      long index = 
followerOM.getOmRatisServer().getLastAppliedTermIndex().getIndex();
-      return index >= leaderOMSnapshotIndex - 1;
+      return followerOM.getOmRatisServer().getLastAppliedTermIndex().getIndex()
+          >= leaderOMSnapshotIndex - 1;
     }, 100, 30_000);
 
     long followerOMLastAppliedIndex =
@@ -569,7 +569,7 @@ private IncrementData getNextIncrementalTarball(
     Path increment = Paths.get(tempDir.toString(), "increment" + numKeys);
     assertTrue(increment.toFile().mkdirs());
     unTarLatestTarBall(followerOM, increment);
-    List<String> sstFiles = HAUtils.getExistingFiles(increment.toFile());
+    List<String> sstFiles = HAUtils.getExistingSstFiles(increment.toFile());
     Path followerCandidatePath = followerOM.getOmSnapshotProvider().
         getCandidateDir().toPath();
 
@@ -655,7 +655,7 @@ public void testInstallIncrementalSnapshotWithFailure() 
throws Exception {
     // Corrupt the mixed checkpoint in the candidate DB dir
     File followerCandidateDir = followerOM.getOmSnapshotProvider().
         getCandidateDir();
-    List<String> sstList = HAUtils.getExistingFiles(followerCandidateDir);
+    List<String> sstList = HAUtils.getExistingSstFiles(followerCandidateDir);
     assertThat(sstList.size()).isGreaterThan(0);
     for (int i = 0; i < sstList.size(); i += 2) {
       File victimSst = new File(followerCandidateDir, sstList.get(i));
@@ -1010,7 +1010,6 @@ public void testInstallCorruptedCheckpointFailure() 
throws Exception {
     DBCheckpoint leaderDbCheckpoint = leaderOM.getMetadataManager().getStore()
         .getCheckpoint(false);
     Path leaderCheckpointLocation = leaderDbCheckpoint.getCheckpointLocation();
-    OmSnapshotUtils.createHardLinks(leaderCheckpointLocation, true);
     TransactionInfo leaderCheckpointTrxnInfo = OzoneManagerRatisUtils
         .getTrxnInfoFromCheckpoint(conf, leaderCheckpointLocation);
 
@@ -1165,7 +1164,7 @@ public void pause() throws IOException {
       // max size config.  That way next time through, we get multiple
       // tarballs.
       if (count == 1) {
-        long sstSize = getSizeOfFiles(tarball);
+        long sstSize = getSizeOfSstFiles(tarball);
         om.getConfiguration().setLong(
             OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY, sstSize / 2);
         // Now empty the tarball to restart the download
@@ -1174,20 +1173,21 @@ public void pause() throws IOException {
       } else {
         // Each time we get a new tarball add a set of
         // its sst file to the list, (i.e. one per tarball.)
-        sstSetList.add(getFilenames(tarball));
+        sstSetList.add(getSstFilenames(tarball));
       }
     }
 
     // Get Size of sstfiles in tarball.
-    private long getSizeOfFiles(File tarball) throws IOException {
+    private long getSizeOfSstFiles(File tarball) throws IOException {
       FileUtil.unTar(tarball, tempDir.toFile());
-      List<Path> sstPaths = Files.walk(tempDir).
+      List<Path> sstPaths = Files.walk(tempDir).filter(
+          path -> path.toString().endsWith(".sst")).
           collect(Collectors.toList());
-      long totalFileSize = 0;
+      long sstSize = 0;
       for (Path sstPath : sstPaths) {
-        totalFileSize += Files.size(sstPath);
+        sstSize += Files.size(sstPath);
       }
-      return totalFileSize;
+      return sstSize;
     }
 
     private void createEmptyTarball(File dummyTarFile)
@@ -1198,18 +1198,21 @@ private void createEmptyTarball(File dummyTarFile)
       archiveOutputStream.close();
     }
 
-    // Return a list of files in tarball.
-    private Set<String> getFilenames(File tarball)
+    // Return a list of sst files in tarball.
+    private Set<String> getSstFilenames(File tarball)
         throws IOException {
-      Set<String> fileNames = new HashSet<>();
+      Set<String> sstFilenames = new HashSet<>();
       try (TarArchiveInputStream tarInput =
            new TarArchiveInputStream(Files.newInputStream(tarball.toPath()))) {
         TarArchiveEntry entry;
         while ((entry = tarInput.getNextTarEntry()) != null) {
-          fileNames.add(entry.getName());
+          String name = entry.getName();
+          if (name.toLowerCase().endsWith(".sst")) {
+            sstFilenames.add(entry.getName());
+          }
         }
       }
-      return fileNames;
+      return sstFilenames;
     }
 
     // Find the tarball in the dir.
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java
index 4d5f1dda1c7..bdf1f7d1dab 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java
@@ -124,8 +124,6 @@ public void testDownloadCheckpoint() throws Exception {
   private long getDownloadedSnapshotIndex(DBCheckpoint dbCheckpoint)
       throws Exception {
 
-    OmSnapshotUtils.createHardLinks(dbCheckpoint.getCheckpointLocation(), 
true);
-
     TransactionInfo trxnInfoFromCheckpoint =
         OzoneManagerRatisUtils.getTrxnInfoFromCheckpoint(conf,
             dbCheckpoint.getCheckpointLocation());
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java
index dee84d4b401..5859b23aa4b 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java
@@ -41,11 +41,13 @@
 import java.nio.file.StandardOpenOption;
 import java.time.Duration;
 import java.time.Instant;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedHashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
@@ -125,6 +127,7 @@ public void init() throws ServletException {
   @Override
   public void processMetadataSnapshotRequest(HttpServletRequest request, 
HttpServletResponse response,
       boolean isFormData, boolean flush) {
+    List<String> excludedSstList = new ArrayList<>();
     String[] sstParam = isFormData ?
         parseFormDataParameters(request) : request.getParameterValues(
         OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST);
@@ -145,7 +148,7 @@ public void 
processMetadataSnapshotRequest(HttpServletRequest request, HttpServl
       long duration = Duration.between(start, end).toMillis();
       LOG.info("Time taken to write the checkpoint to response output " +
           "stream: {} milliseconds", duration);
-      logSstFileList(receivedSstFiles,
+      logSstFileList(excludedSstList,
           "Excluded {} SST files from the latest checkpoint{}: {}", 5);
     } catch (Exception e) {
       LOG.error(
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 56e51cf4026..a4e384890de 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -3988,7 +3988,7 @@ public synchronized TermIndex 
installSnapshotFromLeader(String leaderId) {
     TermIndex termIndex = null;
     try {
       // Install hard links.
-      OmSnapshotUtils.createHardLinks(omDBCheckpoint.getCheckpointLocation(), 
false);
+      OmSnapshotUtils.createHardLinks(omDBCheckpoint.getCheckpointLocation());
       termIndex = installCheckpoint(leaderId, omDBCheckpoint);
     } catch (Exception ex) {
       LOG.error("Failed to install snapshot from Leader OM.", ex);
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
index 8836324410b..355d6249806 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
@@ -36,7 +36,7 @@ public OzoneManagerHttpServer(MutableConfigurationSource conf,
     addServlet("serviceList", OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT,
         ServiceListJSONServlet.class);
     addServlet("dbCheckpoint", OZONE_DB_CHECKPOINT_HTTP_ENDPOINT,
-        OMDBCheckpointServletInodeBasedXfer.class);
+        OMDBCheckpointServlet.class);
     getWebAppContext().setAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE, om);
   }
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java
index ef0a46548a4..a343232c39e 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java
@@ -153,7 +153,7 @@ public void downloadSnapshot(String leaderNodeID, File 
targetFile)
       connection.setRequestProperty("Content-Type", contentTypeValue);
       connection.setDoOutput(true);
       writeFormData(connection,
-          HAUtils.getExistingFiles(getCandidateDir()));
+          HAUtils.getExistingSstFiles(getCandidateDir()));
 
       connection.connect();
       int errorCode = connection.getResponseCode();
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java
index 497c7a064b8..848384ce3e2 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java
@@ -28,14 +28,11 @@
 import java.nio.file.Paths;
 import java.nio.file.attribute.BasicFileAttributes;
 import java.nio.file.attribute.FileTime;
-import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import org.apache.hadoop.ozone.om.OmSnapshotManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Ozone Manager Snapshot Utilities.
@@ -44,8 +41,6 @@ public final class OmSnapshotUtils {
 
   public static final String DATA_PREFIX = "data";
   public static final String DATA_SUFFIX = "txt";
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OmSnapshotUtils.class);
 
   private OmSnapshotUtils() { }
 
@@ -129,12 +124,10 @@ public static Path createHardLinkList(int truncateLength,
    * Create hard links listed in OM_HARDLINK_FILE.
    *
    * @param dbPath Path to db to have links created.
-   * @param deleteSourceFiles - Whether to delete the source files after 
creating the links.
    */
-  public static void createHardLinks(Path dbPath, boolean deleteSourceFiles) 
throws IOException {
+  public static void createHardLinks(Path dbPath) throws IOException {
     File hardLinkFile =
         new File(dbPath.toString(), OmSnapshotManager.OM_HARDLINK_FILE);
-    List<Path> filesToDelete = new ArrayList<>();
     if (hardLinkFile.exists()) {
       // Read file.
       try (Stream<String> s = Files.lines(hardLinkFile.toPath())) {
@@ -142,15 +135,9 @@ public static void createHardLinks(Path dbPath, boolean 
deleteSourceFiles) throw
 
         // Create a link for each line.
         for (String l : lines) {
-          String[] parts = l.split("\t");
-          if (parts.length != 2) {
-            LOG.warn("Skipping malformed line in hardlink file: {}", l);
-            continue;
-          }
-          String from = parts[1];
-          String to = parts[0];
+          String from = l.split("\t")[1];
+          String to = l.split("\t")[0];
           Path fullFromPath = Paths.get(dbPath.toString(), from);
-          filesToDelete.add(fullFromPath);
           Path fullToPath = Paths.get(dbPath.toString(), to);
           // Make parent dir if it doesn't exist.
           Path parent = fullToPath.getParent();
@@ -167,15 +154,6 @@ public static void createHardLinks(Path dbPath, boolean 
deleteSourceFiles) throw
         }
       }
     }
-    if (deleteSourceFiles) {
-      for (Path fileToDelete : filesToDelete) {
-        try {
-          Files.deleteIfExists(fileToDelete);
-        } catch (IOException e) {
-          LOG.warn("Couldn't delete source file {} while unpacking the DB", 
fileToDelete, e);
-        }
-      }
-    }
   }
 
   /**
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
index 7f808df3f97..03c3b4e4ef1 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
@@ -19,7 +19,7 @@
 
 import static org.apache.commons.io.file.PathUtils.copyDirectory;
 import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
-import static org.apache.hadoop.hdds.utils.HAUtils.getExistingFiles;
+import static org.apache.hadoop.hdds.utils.HAUtils.getExistingSstFiles;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_CHECKPOINT_DIR;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
@@ -35,6 +35,7 @@
 import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE;
 import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.VOLUME_TABLE;
 import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.getINode;
+import static 
org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.truncateFileName;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -456,7 +457,7 @@ public void testHardLinkCreation() throws IOException {
     File s1FileLink = new File(followerSnapDir2, "s1.sst");
 
     // Create links on the follower from list.
-    OmSnapshotUtils.createHardLinks(candidateDir.toPath(), false);
+    OmSnapshotUtils.createHardLinks(candidateDir.toPath());
 
     // Confirm expected follower links.
     assertTrue(s1FileLink.exists());
@@ -499,16 +500,44 @@ public void testGetSnapshotInfo() throws IOException {
   @Test
   public void testExcludeUtilities() throws IOException {
     File noLinkFile = new File(followerSnapDir2, "noLink.sst");
-    File nonSstFile = new File(followerSnapDir2, "nonSstFile");
+
     // Confirm that the list of existing sst files is as expected.
-    List<String> existingSstList = getExistingFiles(candidateDir);
+    List<String> existingSstList = getExistingSstFiles(candidateDir);
     Set<String> existingSstFiles = new HashSet<>(existingSstList);
-    Set<String> expectedSstFileNames = new HashSet<>(Arrays.asList(
-        s1File.getName(),
-        noLinkFile.getName(),
-        f1File.getName(),
-        nonSstFile.getName()));
-    assertEquals(expectedSstFileNames, existingSstFiles);
+    int truncateLength = candidateDir.toString().length() + 1;
+    Set<String> expectedSstFiles = new HashSet<>(Arrays.asList(
+        s1File.toString().substring(truncateLength),
+        noLinkFile.toString().substring(truncateLength),
+        f1File.toString().substring(truncateLength)));
+    assertEquals(expectedSstFiles, existingSstFiles);
+
+    // Confirm that the excluded list is normalized as expected.
+    //  (Normalizing means matches the layout on the leader.)
+    File leaderSstBackupDir = new File(leaderDir.toString(), "sstBackup");
+    assertTrue(leaderSstBackupDir.mkdirs());
+    File leaderTmpDir = new File(leaderDir.toString(), "tmp");
+    assertTrue(leaderTmpDir.mkdirs());
+    OMDBCheckpointServlet.DirectoryData sstBackupDir =
+        new OMDBCheckpointServlet.DirectoryData(leaderTmpDir.toPath(),
+        leaderSstBackupDir.toString());
+    Path srcSstBackup = Paths.get(sstBackupDir.getTmpDir().toString(),
+        "backup.sst");
+    Path destSstBackup = Paths.get(sstBackupDir.getOriginalDir().toString(),
+        "backup.sst");
+    truncateLength = leaderDir.toString().length() + 1;
+    existingSstList.add(truncateFileName(truncateLength, destSstBackup));
+    Map<String, Map<Path, Path>> normalizedMap =
+        OMDBCheckpointServlet.normalizeExcludeList(existingSstList,
+        leaderCheckpointDir.toPath(), sstBackupDir);
+    Map<String, Map<Path, Path>> expectedMap = new TreeMap<>();
+    Path s1 = Paths.get(leaderSnapDir1.toString(), "s1.sst");
+    Path noLink = Paths.get(leaderSnapDir2.toString(), "noLink.sst");
+    Path f1 = Paths.get(leaderCheckpointDir.toString(), "f1.sst");
+    expectedMap.put("s1.sst", ImmutableMap.of(s1, s1));
+    expectedMap.put("noLink.sst", ImmutableMap.of(noLink, noLink));
+    expectedMap.put("f1.sst", ImmutableMap.of(f1, f1));
+    expectedMap.put("backup.sst", ImmutableMap.of(srcSstBackup, 
destSstBackup));
+    assertEquals(expectedMap, new TreeMap<>(normalizedMap));
   }
 
   /*
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
index 5409309516a..b4b96ff4d88 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
@@ -84,7 +84,6 @@
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.DBUpdates;
 import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort.Type;
 import org.apache.hadoop.ozone.recon.ReconContext;
@@ -471,7 +470,6 @@ public DBCheckpoint getOzoneManagerDBSnapshot() {
         try (InputStream inputStream = reconUtils.makeHttpCall(
             connectionFactory, getOzoneManagerSnapshotUrl(), 
isOmSpnegoEnabled()).getInputStream()) {
           tarExtractor.extractTar(inputStream, untarredDbDir);
-          OmSnapshotUtils.createHardLinks(untarredDbDir, true);
         } catch (IOException | InterruptedException e) {
           reconContext.updateHealthStatus(new AtomicBoolean(false));
           
reconContext.updateErrors(ReconContext.ErrorCode.GET_OM_DB_SNAPSHOT_FAILED);


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to