taklwu commented on code in PR #7171: URL: https://github.com/apache/hbase/pull/7171#discussion_r2237799822
########## hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/impl/TestBackupAdminImpl.java: ########## @@ -0,0 +1,719 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.impl; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anySet; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.util.BackupUtils; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.ArgumentCaptor; +import org.mockito.MockedStatic; + +/** + * Unit tests for {@link BackupAdminImpl}. + * <p> + * This class improves test coverage by validating the behavior of key methods in BackupAdminImpl. + * Some methods are made package-private to enable testing. + */ +@Category(SmallTests.class) +public class TestBackupAdminImpl { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestBackupAdminImpl.class); + + private BackupAdminImpl backupAdminImpl; + private BackupSystemTable mockTable; + + @Before + public void setUp() { + backupAdminImpl = new BackupAdminImpl(null); + mockTable = mock(BackupSystemTable.class); + } + + /** + * Scenario: - The initial incremental table set contains "table1" and "table2" - Only "table1" + * still exists in backup history Expectation: - The backup system should delete the existing set + * - Then re-add a filtered set that includes only "table1" + */ + @Test + public void testFinalizeDelete_addsRetainedTablesBack() throws IOException { + String backupRoot = "backupRoot1"; + List<String> backupRoots = Collections.singletonList(backupRoot); + + Set<TableName> initialTableSet = + new HashSet<>(Arrays.asList(TableName.valueOf("ns:table1"), TableName.valueOf("ns:table2"))); + + Map<TableName, List<BackupInfo>> backupHistory = new HashMap<>(); + backupHistory.put(TableName.valueOf("ns:table1"), List.of(new BackupInfo())); // Only table1 + // retained + + when(mockTable.getIncrementalBackupTableSet(backupRoot)) + .thenReturn(new HashSet<>(initialTableSet)); + when(mockTable.getBackupHistoryForTableSet(initialTableSet, backupRoot)) + .thenReturn(backupHistory); + + backupAdminImpl.finalizeDelete(backupRoots, mockTable); + + // Always remove existing backup metadata + verify(mockTable).deleteIncrementalBackupTableSet(backupRoot); + + // Re-add only retained tables (should be just table1) + @SuppressWarnings("unchecked") + ArgumentCaptor<Set<TableName>> captor = + (ArgumentCaptor<Set<TableName>>) (ArgumentCaptor<?>) ArgumentCaptor.forClass(Set.class); + verify(mockTable).addIncrementalBackupTableSet(captor.capture(), eq(backupRoot)); + + Set<TableName> retained = captor.getValue(); + assertEquals(1, retained.size()); + assertTrue(retained.contains(TableName.valueOf("ns:table1"))); + } + + /** + * Scenario: - The incremental table set has "tableX" - No backups exist for this table + * Expectation: - Backup metadata should be deleted - Nothing should be re-added since no tables + * are retained + */ + @Test + public void testFinalizeDelete_retainedSetEmpty_doesNotAddBack() throws IOException { + String backupRoot = "backupRoot2"; + List<String> backupRoots = List.of(backupRoot); + + Set<TableName> initialTableSet = Set.of(TableName.valueOf("ns:tableX")); + Map<TableName, List<BackupInfo>> backupHistory = Map.of(); // No overlap + + when(mockTable.getIncrementalBackupTableSet(backupRoot)) + .thenReturn(new HashSet<>(initialTableSet)); + when(mockTable.getBackupHistoryForTableSet(initialTableSet, backupRoot)) + .thenReturn(backupHistory); + + backupAdminImpl.finalizeDelete(backupRoots, mockTable); + + // Delete should be called + verify(mockTable).deleteIncrementalBackupTableSet(backupRoot); + // No add back since retained set is empty + verify(mockTable, never()).addIncrementalBackupTableSet(any(), eq(backupRoot)); + } + + /** + * Scenario: - Two backup roots: - root1: one table with valid backup history → should be retained + * - root2: one table with no history → should not be retained Expectation: - root1 metadata + * deleted and re-added - root2 metadata only deleted + */ + @Test + public void testFinalizeDelete_multipleRoots() throws IOException { + String root1 = "root1"; + String root2 = "root2"; + List<String> roots = List.of(root1, root2); + + TableName t1 = TableName.valueOf("ns:table1"); + TableName t2 = TableName.valueOf("ns:table2"); + + // root1 setup + when(mockTable.getIncrementalBackupTableSet(root1)).thenReturn(new HashSet<>(List.of(t1))); + when(mockTable.getBackupHistoryForTableSet(Set.of(t1), root1)) + .thenReturn(Map.of(t1, List.of(new BackupInfo()))); + + // root2 setup + when(mockTable.getIncrementalBackupTableSet(root2)).thenReturn(new HashSet<>(List.of(t2))); + when(mockTable.getBackupHistoryForTableSet(Set.of(t2), root2)).thenReturn(Map.of()); // empty + // history + + backupAdminImpl.finalizeDelete(roots, mockTable); + + // root1: should delete and re-add table + verify(mockTable).deleteIncrementalBackupTableSet(root1); + verify(mockTable).addIncrementalBackupTableSet(Set.of(t1), root1); + + // root2: delete only + verify(mockTable).deleteIncrementalBackupTableSet(root2); + verify(mockTable, never()).addIncrementalBackupTableSet(anySet(), eq(root2)); + } + + /** + * Verifies that {@code cleanupBackupDir} correctly deletes the target backup directory for a + * given table and backup ID using the mocked FileSystem. + * <p> + * This test ensures: - The correct path is constructed using BackupUtils. - FileSystem#delete is + * invoked with that path. + */ + @Test + public void testCleanupBackupDir_deletesTargetDirSuccessfully() throws Exception { + // Setup test input + String backupId = "backup_001"; + String backupRootDir = "/backup/root"; + TableName table = TableName.valueOf("test_table"); + + BackupInfo mockBackupInfo = mock(BackupInfo.class); + when(mockBackupInfo.getBackupRootDir()).thenReturn(backupRootDir); + when(mockBackupInfo.getBackupId()).thenReturn(backupId); + + Configuration conf = new Configuration(); + + // Spy BackupAdminImpl to mock getFileSystem behavior + backupAdminImpl = spy(backupAdminImpl); + + FileSystem mockFs = mock(FileSystem.class); + Path expectedPath = new Path(BackupUtils.getTableBackupDir(backupRootDir, backupId, table)); + + // Mock getFileSystem to return our mock FileSystem + doReturn(mockFs).when(backupAdminImpl).getFileSystem(any(Path.class), eq(conf)); + when(mockFs.delete(expectedPath, true)).thenReturn(true); + + // Call the method under test + backupAdminImpl.cleanupBackupDir(mockBackupInfo, table, conf); + + // Verify the FileSystem delete call with correct path + verify(mockFs).delete(expectedPath, true); + } + + /** + * Verifies that {@code cleanupBackupDir} throws an {@link IOException} if the FileSystem + * retrieval fails. + * <p> + * This test simulates an exception while trying to obtain the FileSystem, and expects the method + * to propagate the exception. + */ + @Test(expected = IOException.class) + public void testCleanupBackupDir_throwsIOException() throws Exception { + // Setup test input + String backupId = "backup_003"; + String backupRootDir = "/backup/root"; + TableName table = TableName.valueOf("test_table"); + + BackupInfo mockBackupInfo = mock(BackupInfo.class); + when(mockBackupInfo.getBackupRootDir()).thenReturn(backupRootDir); + when(mockBackupInfo.getBackupId()).thenReturn(backupId); + + Configuration conf = new Configuration(); + + // Spy BackupAdminImpl to inject failure in getFileSystem + backupAdminImpl = spy(backupAdminImpl); + doThrow(new IOException("FS error")).when(backupAdminImpl).getFileSystem(any(Path.class), + eq(conf)); + + // Call method and expect IOException + backupAdminImpl.cleanupBackupDir(mockBackupInfo, table, conf); + } + + /** + * Tests that when a current incremental backup is found in the history, all later incremental + * backups for the same table are returned. This simulates rolling forward from the current backup + * timestamp, capturing newer incremental backups that depend on it. + */ + @Test + public void testGetAffectedBackupSessions() throws IOException { + BackupInfo current = mock(BackupInfo.class); + TableName table = TableName.valueOf("test_table"); + + when(current.getStartTs()).thenReturn(2000L); + when(current.getBackupId()).thenReturn("backup_002"); + when(current.getBackupRootDir()).thenReturn("/backup/root"); + + BackupInfo b0 = createBackupInfo("backup_000", 500L, BackupType.FULL, table); + BackupInfo b1 = createBackupInfo("backup_001", 1000L, BackupType.INCREMENTAL, table); + BackupInfo b2 = createBackupInfo("backup_002", 2000L, BackupType.INCREMENTAL, table); // current + BackupInfo b3 = createBackupInfo("backup_003", 3000L, BackupType.INCREMENTAL, table); + BackupInfo b4 = createBackupInfo("backup_004", 4000L, BackupType.INCREMENTAL, table); + + when(mockTable.getBackupHistory("/backup/root")).thenReturn(List.of(b4, b3, b2, b1, b0)); + + List<BackupInfo> result = backupAdminImpl.getAffectedBackupSessions(current, table, mockTable); + + assertEquals(2, result.size()); + assertTrue(result.containsAll(List.of(b3, b4))); + } + + /** + * Tests that if a full backup appears after the current backup, the affected list is reset and + * incremental backups following that full backup are not included. This ensures full backups act + * as a reset boundary. + */ + @Test + public void testGetAffectedBackupSessions_resetsOnFullBackup() throws IOException { + BackupInfo current = mock(BackupInfo.class); + TableName table = TableName.valueOf("test_table"); + + when(current.getStartTs()).thenReturn(1000L); + when(current.getBackupId()).thenReturn("backup_001"); + when(current.getBackupRootDir()).thenReturn("/backup/root"); + + BackupInfo b0 = createBackupInfo("backup_000", 500L, BackupType.FULL, table); + BackupInfo b1 = createBackupInfo("backup_001", 1000L, BackupType.INCREMENTAL, table); // current + BackupInfo b2 = createBackupInfo("backup_002", 2000L, BackupType.FULL, table); + BackupInfo b3 = createBackupInfo("backup_003", 3000L, BackupType.INCREMENTAL, table); + + when(mockTable.getBackupHistory("/backup/root")).thenReturn(List.of(b3, b2, b1, b0)); + + List<BackupInfo> result = backupAdminImpl.getAffectedBackupSessions(current, table, mockTable); + + assertTrue(result.isEmpty()); + } + + /** + * Tests that backups for other tables are ignored, even if they are incremental and fall after + * the current backup. Only backups affecting the specified table should be considered. + */ + @Test + public void testGetAffectedBackupSessions_skipsNonMatchingTable() throws IOException { Review Comment: nit: do we have a test for a other table with `BackupType.FULL` , and I assumed it's not going to include those unmatched table. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@hbase.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org