This is an automated email from the ASF dual-hosted git repository.
kturner pushed a commit to branch elasticity
in repository https://gitbox.apache.org/repos/asf/accumulo.git
The following commit(s) were added to refs/heads/elasticity by this push:
new 92fbdaa228 fixes SplitRecoveryIT to work with newer columns (#4260)
92fbdaa228 is described below
commit 92fbdaa228dec64af4d970e494e51b732113c160
Author: Keith Turner <[email protected]>
AuthorDate: Wed Feb 14 17:12:28 2024 -0500
fixes SplitRecoveryIT to work with newer columns (#4260)
SplitRecoveryIT is a tricky test to maintain because it is testing
upgrade code that deals with older metadata columns that Accumulo
no longer recognizes. This commit adjust the test to work with
recent changes to the metadata schema in the elasticity branch.
---
.../manager/upgrade/SplitRecovery12to13.java | 52 ---------------
.../accumulo/test/functional/SplitRecoveryIT.java | 73 +++++++++++++++++++---
2 files changed, 63 insertions(+), 62 deletions(-)
diff --git
a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/SplitRecovery12to13.java
b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/SplitRecovery12to13.java
index 03e4180f8c..b339462503 100644
---
a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/SplitRecovery12to13.java
+++
b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/SplitRecovery12to13.java
@@ -22,7 +22,6 @@ import static
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSec
import static
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.Upgrade12to13.SPLIT_RATIO_COLUMN;
import java.util.ArrayList;
-import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -33,8 +32,6 @@ import java.util.TreeMap;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.TabletAvailability;
-import org.apache.accumulo.core.client.admin.TimeType;
import org.apache.accumulo.core.clientImpl.ScannerImpl;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
@@ -43,19 +40,12 @@ import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.KeyExtent;
-import org.apache.accumulo.core.fate.FateId;
-import org.apache.accumulo.core.lock.ServiceLock;
-import org.apache.accumulo.core.metadata.ReferencedTabletFile;
import org.apache.accumulo.core.metadata.StoredTabletFile;
-import org.apache.accumulo.core.metadata.TServerInstance;
import org.apache.accumulo.core.metadata.schema.Ample;
-import org.apache.accumulo.core.metadata.schema.Ample.TabletMutator;
import org.apache.accumulo.core.metadata.schema.DataFileValue;
import org.apache.accumulo.core.metadata.schema.ExternalCompactionId;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataTime;
-import org.apache.accumulo.core.metadata.schema.TabletMetadata.Location;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.server.ServerContext;
import org.apache.accumulo.server.util.FileUtil;
@@ -68,37 +58,6 @@ public class SplitRecovery12to13 {
private static final Logger log =
LoggerFactory.getLogger(SplitRecovery12to13.class);
- public static void addNewTablet(ServerContext context, KeyExtent extent,
String dirName,
- TServerInstance tServerInstance, Map<StoredTabletFile,DataFileValue>
datafileSizes,
- Map<FateId,? extends Collection<ReferencedTabletFile>> bulkLoadedFiles,
MetadataTime time,
- long lastFlushID) {
-
- TabletMutator tablet = context.getAmple().mutateTablet(extent);
- tablet.putPrevEndRow(extent.prevEndRow());
- tablet.putDirName(dirName);
- tablet.putTime(time);
-
- if (lastFlushID > 0) {
- tablet.putFlushId(lastFlushID);
- }
-
- if (tServerInstance != null) {
- tablet.putLocation(Location.current(tServerInstance));
- tablet.deleteLocation(Location.future(tServerInstance));
- }
-
- datafileSizes.forEach((key, value) -> tablet.putFile(key, value));
-
- for (Entry<FateId,? extends Collection<ReferencedTabletFile>> entry :
bulkLoadedFiles
- .entrySet()) {
- for (ReferencedTabletFile ref : entry.getValue()) {
- tablet.putBulkFile(ref, entry.getKey());
- }
- }
-
- tablet.mutate();
- }
-
public static KeyExtent fixSplit(ServerContext context, Text metadataEntry)
throws AccumuloException {
var tableId = KeyExtent.fromMetaRow(metadataEntry).tableId();
@@ -286,15 +245,4 @@ public class SplitRecovery12to13 {
finishSplit(extent.toMetaRow(), datafileSizes, highDatafilesToRemove,
context);
}
- public static void addTablet(KeyExtent extent, String path, ServerContext
context,
- TimeType timeType, ServiceLock zooLock, TabletAvailability
tabletAvailability) {
- TabletMutator tablet = context.getAmple().mutateTablet(extent);
- tablet.putPrevEndRow(extent.prevEndRow());
- tablet.putDirName(path);
- tablet.putTime(new MetadataTime(0, timeType));
- tablet.putZooLock(context.getZooKeeperRoot(), zooLock);
- tablet.putTabletAvailability(tabletAvailability);
- tablet.mutate();
-
- }
}
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
index 082b5de103..64b5bc46a2 100644
---
a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
+++
b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
@@ -38,7 +38,6 @@ import java.util.UUID;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.TabletAvailability;
import org.apache.accumulo.core.client.admin.TimeType;
import org.apache.accumulo.core.clientImpl.ScannerImpl;
import org.apache.accumulo.core.conf.SiteConfiguration;
@@ -165,8 +164,7 @@ public class SplitRecoveryIT extends ConfigurableMacBase {
String dirName = "dir_" + i;
String tdir =
context.getTablesDirs().iterator().next() + "/" + extent.tableId() +
"/" + dirName;
- SplitRecovery12to13.addTablet(extent, dirName, context,
TimeType.LOGICAL, zl,
- TabletAvailability.ONDEMAND);
+ addTablet(extent, dirName, context, TimeType.LOGICAL, zl);
SortedMap<ReferencedTabletFile,DataFileValue> dataFiles = new
TreeMap<>();
dataFiles.put(new ReferencedTabletFile(new Path(tdir + "/" +
RFile.EXTENSION + "_000_000")),
new DataFileValue(1000017 + i, 10000 + i));
@@ -191,12 +189,26 @@ public class SplitRecoveryIT extends ConfigurableMacBase {
private static Map<FateId,List<ReferencedTabletFile>>
getBulkFilesLoaded(ServerContext context,
KeyExtent extent) {
- Map<FateId,List<ReferencedTabletFile>> bulkFiles = new HashMap<>();
- context.getAmple().readTablet(extent).getLoaded().forEach((path, fateId)
-> bulkFiles
- .computeIfAbsent(fateId, k -> new
ArrayList<>()).add(path.getTabletFile()));
+ // Ample is not used here because it does not recognize some of the old
columns that this
+ // upgrade code is dealing with.
+ try (Scanner scanner =
+ context.createScanner(AccumuloTable.METADATA.tableName(),
Authorizations.EMPTY)) {
+ scanner.setRange(extent.toMetaRange());
+
+ Map<FateId,List<ReferencedTabletFile>> bulkFiles = new HashMap<>();
+ for (var entry : scanner) {
+ if
(entry.getKey().getColumnFamily().equals(BulkFileColumnFamily.NAME)) {
+ var path = new
StoredTabletFile(entry.getKey().getColumnQualifier().toString());
+ var txid = BulkFileColumnFamily.getBulkLoadTid(entry.getValue());
+ bulkFiles.computeIfAbsent(txid, k -> new
ArrayList<>()).add(path.getTabletFile());
+ }
+ }
- return bulkFiles;
+ return bulkFiles;
+ } catch (Exception e) {
+ throw new IllegalStateException(e);
+ }
}
private void splitPartiallyAndRecover(ServerContext context, KeyExtent
extent, KeyExtent high,
@@ -221,8 +233,8 @@ public class SplitRecoveryIT extends ConfigurableMacBase {
if (steps >= 1) {
Map<FateId,List<ReferencedTabletFile>> bulkFiles =
getBulkFilesLoaded(context, high);
- SplitRecovery12to13.addNewTablet(context, low, "lowDir", instance,
lowDatafileSizes,
- bulkFiles, new MetadataTime(0, TimeType.LOGICAL), -1L);
+ addNewTablet(context, low, "lowDir", instance, lowDatafileSizes,
bulkFiles,
+ new MetadataTime(0, TimeType.LOGICAL), -1L);
}
if (steps >= 2) {
SplitRecovery12to13.finishSplit(high, highDatafileSizes,
highDatafilesToRemove, context);
@@ -286,7 +298,6 @@ public class SplitRecoveryIT extends ConfigurableMacBase {
expectedColumnFamilies.add(CurrentLocationColumnFamily.NAME);
expectedColumnFamilies.add(LastLocationColumnFamily.NAME);
expectedColumnFamilies.add(BulkFileColumnFamily.NAME);
- expectedColumnFamilies.add(TabletColumnFamily.NAME);
Iterator<Entry<Key,Value>> iter = scanner.iterator();
@@ -362,4 +373,46 @@ public class SplitRecoveryIT extends ConfigurableMacBase {
public void test() throws Exception {
assertEquals(0, exec(SplitRecoveryIT.class).waitFor());
}
+
+ public static void addTablet(KeyExtent extent, String path, ServerContext
context,
+ TimeType timeType, ServiceLock zooLock) {
+ TabletMutator tablet = context.getAmple().mutateTablet(extent);
+ tablet.putPrevEndRow(extent.prevEndRow());
+ tablet.putDirName(path);
+ tablet.putTime(new MetadataTime(0, timeType));
+ tablet.putZooLock(context.getZooKeeperRoot(), zooLock);
+ tablet.mutate();
+
+ }
+
+ public static void addNewTablet(ServerContext context, KeyExtent extent,
String dirName,
+ TServerInstance tServerInstance, Map<StoredTabletFile,DataFileValue>
datafileSizes,
+ Map<FateId,? extends Collection<ReferencedTabletFile>> bulkLoadedFiles,
MetadataTime time,
+ long lastFlushID) {
+
+ TabletMutator tablet = context.getAmple().mutateTablet(extent);
+ tablet.putPrevEndRow(extent.prevEndRow());
+ tablet.putDirName(dirName);
+ tablet.putTime(time);
+
+ if (lastFlushID > 0) {
+ tablet.putFlushId(lastFlushID);
+ }
+
+ if (tServerInstance != null) {
+ tablet.putLocation(Location.current(tServerInstance));
+ tablet.deleteLocation(Location.future(tServerInstance));
+ }
+
+ datafileSizes.forEach((key, value) -> tablet.putFile(key, value));
+
+ for (Entry<FateId,? extends Collection<ReferencedTabletFile>> entry :
bulkLoadedFiles
+ .entrySet()) {
+ for (ReferencedTabletFile ref : entry.getValue()) {
+ tablet.putBulkFile(ref, entry.getKey());
+ }
+ }
+
+ tablet.mutate();
+ }
}