This is an automated email from the ASF dual-hosted git repository. kturner pushed a commit to branch main in repository https://gitbox.apache.org/repos/asf/accumulo.git
commit 9c6db1f3b63f4acfcd85b83a7ec3c119c7f6f0c8 Merge: fdabdc5133 6f7d3212d0 Author: Keith Turner <ktur...@apache.org> AuthorDate: Thu May 11 17:14:20 2023 -0400 Merge remote-tracking branch 'upstream/2.1' .../threads/AccumuloUncaughtExceptionHandler.java | 11 ++-- .../AccumuloUncaughtExceptionHandlerTest.java | 6 +- .../accumulo/tserver/tablet/DatafileManager.java | 64 +++++++++++----------- 3 files changed, 40 insertions(+), 41 deletions(-) diff --cc server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java index eb9f2f8d54,5b349c3f67..beea607422 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java @@@ -330,22 -331,61 +329,21 @@@ class DatafileManager long t1, t2; -- Set<String> unusedWalLogs = tablet.beginClearingUnusedLogs(); - try { - // the order of writing to metadata and walog is important in the face of machine/process - // failures need to write to metadata before writing to walog, when things are done in the - // reverse order data could be lost... the minor compaction start even should be written - // before the following metadata write is made - newFile = tablet.updateTabletDataFile(commitSession.getMaxCommittedTime(), newDatafile, dfv, - unusedWalLogs, flushId); - } finally { - tablet.finishClearingUnusedLogs(); - @SuppressWarnings("deprecation") - boolean replicate = org.apache.accumulo.core.replication.ReplicationConfigurationUtil - .isEnabled(tablet.getExtent(), tablet.getTableConfiguration()); - Set<String> logFileOnly = null; - if (replicate) { - // unusedWalLogs is of the form host/fileURI, need to strip off the host portion - logFileOnly = new HashSet<>(); - for (String unusedWalLog : unusedWalLogs) { - int index = unusedWalLog.indexOf('/'); - if (index == -1) { - log.warn("Could not find host component to strip from DFSLogger representation of WAL"); - } else { - unusedWalLog = unusedWalLog.substring(index + 1); - } - logFileOnly.add(unusedWalLog); - } -- } -- // increment start count before metadata update AND updating in memory map of files metadataUpdateCount.updateAndGet(MetadataUpdateCount::incrementStart); // do not place any code here between above stmt and try{}finally try { ++ Set<String> unusedWalLogs = tablet.beginClearingUnusedLogs(); + try { + // the order of writing to metadata and walog is important in the face of machine/process + // failures need to write to metadata before writing to walog, when things are done in the + // reverse order data could be lost... the minor compaction start even should be written + // before the following metadata write is made + newFile = tablet.updateTabletDataFile(commitSession.getMaxCommittedTime(), newDatafile, dfv, + unusedWalLogs, flushId); - - // Mark that we have data we want to replicate - // This WAL could still be in use by other Tablets *from the same table*, so we can only - // mark - // that there is data to replicate, - // but it is *not* closed. We know it is not closed by the fact that this MinC triggered. A - // MinC cannot happen unless the - // tablet is online and thus these WALs are referenced by that tablet. Therefore, the WAL - // replication status cannot be 'closed'. - if (replicate) { - if (log.isDebugEnabled()) { - log.debug("Recording that data has been ingested into {} using {}", tablet.getExtent(), - logFileOnly); - } - for (String logFile : logFileOnly) { - @SuppressWarnings("deprecation") - Status status = - org.apache.accumulo.server.replication.StatusUtil.openWithUnknownLength(); - ReplicationTableUtil.updateFiles(tablet.getContext(), tablet.getExtent(), logFile, - status); - } - } + } finally { + tablet.finishClearingUnusedLogs(); + } do { try {