rmdmattingly commented on code in PR #6506:
URL: https://github.com/apache/hbase/pull/6506#discussion_r1987211573
##########
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java:
##########
@@ -106,4 +123,62 @@ private void registerBulkLoad(ObserverContext<? extends
RegionCoprocessorEnviron
}
}
}
+
+ @Override
+ public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment>
ctx,
+ TableName tableName) throws IOException {
+ Configuration cfg = ctx.getEnvironment().getConfiguration();
+ if (!BackupManager.isBackupEnabled(cfg)) {
+ LOG.debug("Skipping postDeleteTable hook since backup is disabled");
+ return;
+ }
+ deleteBulkLoads(cfg, tableName, (ignored) -> true);
+ }
+
+ @Override
+ public void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment>
ctx,
+ TableName tableName) throws IOException {
+ Configuration cfg = ctx.getEnvironment().getConfiguration();
+ if (!BackupManager.isBackupEnabled(cfg)) {
+ LOG.debug("Skipping postTruncateTable hook since backup is disabled");
+ return;
+ }
+ deleteBulkLoads(cfg, tableName, (ignored) -> true);
+ }
+
+ @Override
+ public void postModifyTable(final
ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final TableName tableName, TableDescriptor oldDescriptor, TableDescriptor
currentDescriptor)
+ throws IOException {
+ Configuration cfg = ctx.getEnvironment().getConfiguration();
+ if (!BackupManager.isBackupEnabled(cfg)) {
+ LOG.debug("Skipping postModifyTable hook since backup is disabled");
+ return;
+ }
+
+ Set<String> oldFamilies = Arrays.stream(oldDescriptor.getColumnFamilies())
+
.map(ColumnFamilyDescriptor::getNameAsString).collect(Collectors.toSet());
+ Set<String> newFamilies =
Arrays.stream(currentDescriptor.getColumnFamilies())
+
.map(ColumnFamilyDescriptor::getNameAsString).collect(Collectors.toSet());
+
+ Set<String> removedFamilies = Sets.difference(oldFamilies, newFamilies);
+ if (!removedFamilies.isEmpty()) {
+ Predicate<BulkLoad> filter = bulkload ->
removedFamilies.contains(bulkload.getColumnFamily());
+ deleteBulkLoads(cfg, tableName, filter);
+ }
+ }
+
+ /**
+ * Deletes all bulk load entries for the given table, matching the provided
predicate.
+ */
+ private void deleteBulkLoads(Configuration config, TableName tableName,
+ Predicate<BulkLoad> filter) throws IOException {
+ try (Connection connection = ConnectionFactory.createConnection(config);
+ BackupSystemTable tbl = new BackupSystemTable(connection)) {
+ List<BulkLoad> bulkLoads = tbl.readBulkloadRows(List.of(tableName));
Review Comment:
This is a concern for another time, but it's only a matter of time until the
lack of pagination in `BackupSystemTable#readBulkloadRows` bites us
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]