This is an automated email from the ASF dual-hosted git repository. edcoleman pushed a commit to branch main in repository https://gitbox.apache.org/repos/asf/accumulo.git
The following commit(s) were added to refs/heads/main by this push: new 227f84fb48 3.1 upgrade process with no chop changes (#3876) 227f84fb48 is described below commit 227f84fb48a10c7c82799f80aa65563860aa6fba Author: EdColeman <d...@etcoleman.com> AuthorDate: Wed Nov 8 19:19:41 2023 -0500 3.1 upgrade process with no chop changes (#3876) 3.1 upgrade process with to update file references to include new json encoding with fencing made to support no chop merges. Changes the ZooKeeper root table information and uses an isolated scanner to modify the root table and the metadata table with: - modifies file references to use new json encoding with default fence ranges. - removes obsolete chopped column family references - removes external compaction references to eliminate old file references without range information. The root table info in ZooKeeper is logged before and after the upgrade as an emergency backup if needed. Fixes #3768 --- .../core/metadata/schema/RootTabletMetadata.java | 65 ++++- .../schema/UpgraderDeprecatedConstants.java | 8 +- .../metadata/schema/RootTabletMetadataTest.java | 119 ++++++++ .../accumulo/server/AccumuloDataVersion.java | 6 +- .../server/constraints/MetadataConstraints.java | 5 +- .../apache/accumulo/server/ServerContextTest.java | 5 +- server/manager/pom.xml | 4 + .../manager/upgrade/UpgradeCoordinator.java | 6 +- .../accumulo/manager/upgrade/Upgrader11to12.java | 209 ++++++++++++++ .../manager/upgrade/Upgrader11to12Test.java | 317 +++++++++++++++++++++ 10 files changed, 726 insertions(+), 18 deletions(-) diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/RootTabletMetadata.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/RootTabletMetadata.java index 7ea03a19a4..8a39e34798 100644 --- a/core/src/main/java/org/apache/accumulo/core/metadata/schema/RootTabletMetadata.java +++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/RootTabletMetadata.java @@ -36,6 +36,7 @@ import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.metadata.RootTable; +import org.apache.accumulo.core.metadata.StoredTabletFile; import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.CurrentLocationColumnFamily; import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.FutureLocationColumnFamily; import org.apache.hadoop.io.Text; @@ -59,7 +60,10 @@ public class RootTabletMetadata { }; // JSON Mapping Version 1. Released with Accumulo version 2.1.0 - private static final int VERSION = 1; + private static final int VERSION_1 = 1; + // JSON Mapping Version 2. Released with Accumulo version 3,1 + private static final int VERSION_2 = 2; + private static final int VERSION = VERSION_2; // This class is used to serialize and deserialize root tablet metadata using GSon. Any changes to // this class must consider persisted data. @@ -73,10 +77,20 @@ public class RootTabletMetadata { */ private final TreeMap<String,TreeMap<String,String>> columnValues; - public Data(int version, TreeMap<String,TreeMap<String,String>> columnValues) { + private Data(int version, TreeMap<String,TreeMap<String,String>> columnValues) { this.version = version; this.columnValues = columnValues; } + + public int getVersion() { + return version; + } + + public static boolean needsUpgrade(final String json) { + var rootData = GSON.get().fromJson(json, Data.class); + int currVersion = rootData.getVersion(); + return currVersion < VERSION; + } } /** @@ -95,8 +109,11 @@ public class RootTabletMetadata { private final Data data; public RootTabletMetadata(String json) { - log.trace("Creating root tablet metadata from stored JSON: {}", json); - this.data = GSON.get().fromJson(json, Data.class); + this(GSON.get().fromJson(json, Data.class)); + } + + private RootTabletMetadata(final Data data) { + this.data = data; checkArgument(data.version == VERSION, "Invalid Root Table Metadata JSON version %s", data.version); data.columnValues.forEach((fam, qualVals) -> { @@ -106,7 +123,41 @@ public class RootTabletMetadata { } public RootTabletMetadata() { - this.data = new Data(VERSION, new TreeMap<>()); + data = new Data(VERSION, new TreeMap<>()); + } + + public static RootTabletMetadata upgrade(final String json) { + Data data = GSON.get().fromJson(json, Data.class); + int currVersion = data.getVersion(); + switch (currVersion) { + case VERSION_1: + RootTabletMetadata rtm = new RootTabletMetadata(); + Mutation m = convert1To2(data); + rtm.update(m); + return rtm; + case VERSION_2: + log.debug("no metadata version conversion required for {}", currVersion); + return new RootTabletMetadata(data); + default: + throw new IllegalArgumentException("Unsupported data version: " + currVersion); + } + } + + private static Mutation convert1To2(final Data data) { + Mutation mutation = + MetadataSchema.TabletsSection.TabletColumnFamily.createPrevRowMutation(RootTable.EXTENT); + data.columnValues.forEach((colFam, colQuals) -> { + if (colFam.equals(MetadataSchema.TabletsSection.DataFileColumnFamily.STR_NAME)) { + colQuals.forEach((colQual, value) -> { + mutation.put(colFam, StoredTabletFile.serialize(colQual), value); + }); + } else { + colQuals.forEach((colQual, value) -> { + mutation.put(colFam, colQual, value); + }); + } + }); + return mutation; } /** @@ -158,6 +209,10 @@ public class RootTabletMetadata { EnumSet.allOf(TabletMetadata.ColumnType.class), false); } + public static boolean needsUpgrade(final String json) { + return Data.needsUpgrade(json); + } + /** * @return a JSON representation of the root tablet's data. */ diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgraderConstants.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/UpgraderDeprecatedConstants.java similarity index 85% rename from server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgraderConstants.java rename to core/src/main/java/org/apache/accumulo/core/metadata/schema/UpgraderDeprecatedConstants.java index 51c2a34df1..1d9967cc71 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgraderConstants.java +++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/UpgraderDeprecatedConstants.java @@ -16,12 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.accumulo.manager.upgrade; +package org.apache.accumulo.core.metadata.schema; import org.apache.accumulo.core.util.ColumnFQ; import org.apache.hadoop.io.Text; -public class UpgraderConstants { +/** + * MetadataSchema constants that are deprecated and should only be used to support removals during + * the upgrade process. + */ +public class UpgraderDeprecatedConstants { /** * ChoppedColumnFamily kept around for cleaning up old entries on upgrade. Currently not used, diff --git a/core/src/test/java/org/apache/accumulo/core/metadata/schema/RootTabletMetadataTest.java b/core/src/test/java/org/apache/accumulo/core/metadata/schema/RootTabletMetadataTest.java new file mode 100644 index 0000000000..6a0a81f93b --- /dev/null +++ b/core/src/test/java/org/apache/accumulo/core/metadata/schema/RootTabletMetadataTest.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.accumulo.core.metadata.schema; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.apache.accumulo.core.metadata.StoredTabletFile; +import org.apache.hadoop.fs.Path; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class RootTabletMetadataTest { + private static final Logger LOG = LoggerFactory.getLogger(RootTabletMetadataTest.class); + + @Test + public void convertRoot1File() { + String root21ZkData = + "{\"version\":1,\"columnValues\":{\"file\":{\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/A000000v.rf\":\"1368,61\"},\"last\":{\"100025091780006\":\"localhost:9997\"},\"loc\":{\"100025091780006\":\"localhost:9997\"},\"srv\":{\"dir\":\"root_tablet\",\"flush\":\"3\",\"lock\":\"tservers/localhost:9997/zlock#9db8961a-4ee9-400e-8e80-3353148baadd#0000000000$100025091780006\",\"time\":\"L53\"},\"~tab\":{\"~pr\":\"\\u0000\"}}}"; + + RootTabletMetadata rtm = RootTabletMetadata.upgrade(root21ZkData); + LOG.debug("converted column values: {}", rtm.toTabletMetadata().getFiles()); + + var files = rtm.toTabletMetadata().getFiles(); + LOG.info("FILES: {}", rtm.toTabletMetadata().getFilesMap()); + + assertEquals(1, files.size()); + assertTrue(files.contains(StoredTabletFile + .of(new Path("hdfs://localhost:8020/accumulo/tables/+r/root_tablet/A000000v.rf")))); + } + + @Test + public void convertRoot2Files() { + String root212ZkData2Files = + "{\"version\":1,\"columnValues\":{\"file\":{\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/00000_00000.rf\":\"0,0\",\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/F000000c.rf\":\"926,18\"},\"last\":{\"10001a84d7d0005\":\"localhost:9997\"},\"loc\":{\"10001a84d7d0005\":\"localhost:9997\"},\"srv\":{\"dir\":\"root_tablet\",\"flush\":\"2\",\"lock\":\"tservers/localhost:9997/zlock#d21adaa4-0f97-4004-9ff8-cce9dbb6687f#0000000000$10001a84d7d0005\",\"time\":\"L6\"},\"~tab\ [...] + + RootTabletMetadata rtm = RootTabletMetadata.upgrade(root212ZkData2Files); + LOG.debug("converted column values: {}", rtm.toTabletMetadata()); + + var files = rtm.toTabletMetadata().getFiles(); + LOG.info("FILES: {}", rtm.toTabletMetadata().getFilesMap()); + + assertEquals(2, files.size()); + assertTrue(files.contains(StoredTabletFile + .of(new Path("hdfs://localhost:8020/accumulo/tables/+r/root_tablet/00000_00000.rf")))); + assertTrue(files.contains(StoredTabletFile + .of(new Path("hdfs://localhost:8020/accumulo/tables/+r/root_tablet/F000000c.rf")))); + } + + @Test + public void needsUpgradeTest() { + String root212ZkData2Files = + "{\"version\":1,\"columnValues\":{\"file\":{\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/00000_00000.rf\":\"0,0\",\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/F000000c.rf\":\"926,18\"},\"last\":{\"10001a84d7d0005\":\"localhost:9997\"},\"loc\":{\"10001a84d7d0005\":\"localhost:9997\"},\"srv\":{\"dir\":\"root_tablet\",\"flush\":\"2\",\"lock\":\"tservers/localhost:9997/zlock#d21adaa4-0f97-4004-9ff8-cce9dbb6687f#0000000000$10001a84d7d0005\",\"time\":\"L6\"},\"~tab\ [...] + assertTrue(RootTabletMetadata.needsUpgrade(root212ZkData2Files)); + + String converted = + "{\"version\":2,\"columnValues\":{\"file\":{\"{\\\"path\\\":\\\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/A0000013.rf\\\",\\\"startRow\\\":\\\"\\\",\\\"endRow\\\":\\\"\\\"}\":\"974,19\",\"{\\\"path\\\":\\\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/F0000014.rf\\\",\\\"startRow\\\":\\\"\\\",\\\"endRow\\\":\\\"\\\"}\":\"708,8\"},\"last\":{\"100024ec6110005\":\"localhost:9997\"},\"srv\":{\"dir\":\"root_tablet\",\"flush\":\"6\",\"lock\":\"tservers/localhost:9997/ [...] + + assertFalse(RootTabletMetadata.needsUpgrade(converted)); + } + + @Test + public void ignoresConvertedTest() { + String converted = + "{\"version\":2,\"columnValues\":{\"file\":{\"{\\\"path\\\":\\\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/A0000013.rf\\\",\\\"startRow\\\":\\\"\\\",\\\"endRow\\\":\\\"\\\"}\":\"974,19\",\"{\\\"path\\\":\\\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/F0000014.rf\\\",\\\"startRow\\\":\\\"\\\",\\\"endRow\\\":\\\"\\\"}\":\"708,8\"},\"last\":{\"100024ec6110005\":\"localhost:9997\"},\"srv\":{\"dir\":\"root_tablet\",\"flush\":\"6\",\"lock\":\"tservers/localhost:9997/ [...] + + assertFalse(RootTabletMetadata.needsUpgrade(converted)); + + RootTabletMetadata rtm = RootTabletMetadata.upgrade(converted); + var files = rtm.toTabletMetadata().getFiles(); + assertEquals(2, files.size()); + assertTrue(files.contains(StoredTabletFile + .of(new Path("hdfs://localhost:8020/accumulo/tables/+r/root_tablet/A0000013.rf")))); + assertTrue(files.contains(StoredTabletFile + .of(new Path("hdfs://localhost:8020/accumulo/tables/+r/root_tablet/F0000014.rf")))); + + } + + @Test + public void invalidVersionTest() { + String valid = + "{\"version\":1,\"columnValues\":{\"file\":{\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/A000000v.rf\":\"1368,61\"},\"last\":{\"100025091780006\":\"localhost:9997\"},\"loc\":{\"100025091780006\":\"localhost:9997\"},\"srv\":{\"dir\":\"root_tablet\",\"flush\":\"3\",\"lock\":\"tservers/localhost:9997/zlock#9db8961a-4ee9-400e-8e80-3353148baadd#0000000000$100025091780006\",\"time\":\"L53\"},\"~tab\":{\"~pr\":\"\\u0000\"}}}"; + // only version changed to invalid value + String invalid = + "{\"version\":-1,\"columnValues\":{\"file\":{\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/A000000v.rf\":\"1368,61\"},\"last\":{\"100025091780006\":\"localhost:9997\"},\"loc\":{\"100025091780006\":\"localhost:9997\"},\"srv\":{\"dir\":\"root_tablet\",\"flush\":\"3\",\"lock\":\"tservers/localhost:9997/zlock#9db8961a-4ee9-400e-8e80-3353148baadd#0000000000$100025091780006\",\"time\":\"L53\"},\"~tab\":{\"~pr\":\"\\u0000\"}}}"; + + assertTrue(RootTabletMetadata.needsUpgrade(valid)); + + RootTabletMetadata rtm = RootTabletMetadata.upgrade(valid); + var files = rtm.toTabletMetadata().getFiles(); + assertEquals(1, files.size()); + assertTrue(files.contains(StoredTabletFile + .of(new Path("hdfs://localhost:8020/accumulo/tables/+r/root_tablet/A000000v.rf")))); + + // valid json with files, so try conversion + assertTrue(RootTabletMetadata.needsUpgrade(invalid)); + + assertThrows(IllegalArgumentException.class, () -> RootTabletMetadata.upgrade(invalid)); + } +} diff --git a/server/base/src/main/java/org/apache/accumulo/server/AccumuloDataVersion.java b/server/base/src/main/java/org/apache/accumulo/server/AccumuloDataVersion.java index 7c547dcb4e..4aae44a974 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/AccumuloDataVersion.java +++ b/server/base/src/main/java/org/apache/accumulo/server/AccumuloDataVersion.java @@ -77,10 +77,8 @@ public class AccumuloDataVersion { return CURRENT_VERSION; } - // TODO - this disables upgrades until https://github.com/apache/accumulo/issues/3768 is done - // public static final Set<Integer> CAN_RUN = Set.of(ROOT_TABLET_META_CHANGES, - // REMOVE_DEPRECATIONS_FOR_VERSION_3, CURRENT_VERSION); - public static final Set<Integer> CAN_RUN = Set.of(CURRENT_VERSION); + public static final Set<Integer> CAN_RUN = + Set.of(ROOT_TABLET_META_CHANGES, REMOVE_DEPRECATIONS_FOR_VERSION_3, CURRENT_VERSION); /** * Get the stored, current working version. diff --git a/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java b/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java index 97dd31f203..936d7f16ef 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java +++ b/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java @@ -51,6 +51,7 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.Sc import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily; import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.SuspendLocationColumn; import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily; +import org.apache.accumulo.core.metadata.schema.UpgraderDeprecatedConstants; import org.apache.accumulo.core.util.ColumnFQ; import org.apache.accumulo.core.util.cleaner.CleanerUtil; import org.apache.accumulo.server.ServerContext; @@ -96,7 +97,9 @@ public class MetadataConstraints implements Constraint { LastLocationColumnFamily.NAME, FutureLocationColumnFamily.NAME, ClonedColumnFamily.NAME, - ExternalCompactionColumnFamily.NAME); + ExternalCompactionColumnFamily.NAME, + UpgraderDeprecatedConstants.ChoppedColumnFamily.NAME + ); // @formatter:on private static boolean isValidColumn(ColumnUpdate cu) { diff --git a/server/base/src/test/java/org/apache/accumulo/server/ServerContextTest.java b/server/base/src/test/java/org/apache/accumulo/server/ServerContextTest.java index fbce43443d..5df5668804 100644 --- a/server/base/src/test/java/org/apache/accumulo/server/ServerContextTest.java +++ b/server/base/src/test/java/org/apache/accumulo/server/ServerContextTest.java @@ -135,10 +135,7 @@ public class ServerContextTest { // ensure this fails with older versions; the oldest supported version is hard-coded here // to ensure we don't unintentionally break upgrade support; changing this should be a conscious // decision and this check will ensure we don't overlook it - - // TODO basically disable check until upgrade to 3.1 is supported. Should be: - // final int oldestSupported = AccumuloDataVersion.ROOT_TABLET_META_CHANGES; - final int oldestSupported = AccumuloDataVersion.METADATA_FILE_JSON_ENCODING; + final int oldestSupported = 10; // AccumuloDataVersion.ROOT_TABLET_META_CHANGES; final int currentVersion = AccumuloDataVersion.get(); IntConsumer shouldPass = ServerContext::ensureDataVersionCompatible; IntConsumer shouldFail = v -> assertThrows(IllegalStateException.class, diff --git a/server/manager/pom.xml b/server/manager/pom.xml index 5b84e1d579..0511f60527 100644 --- a/server/manager/pom.xml +++ b/server/manager/pom.xml @@ -100,6 +100,10 @@ <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper-jute</artifactId> </dependency> + <dependency> + <groupId>org.checkerframework</groupId> + <artifactId>checker-qual</artifactId> + </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-api</artifactId> diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java index c4a58dee55..f5fd84c0e1 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java @@ -18,6 +18,7 @@ */ package org.apache.accumulo.manager.upgrade; +import static org.apache.accumulo.server.AccumuloDataVersion.REMOVE_DEPRECATIONS_FOR_VERSION_3; import static org.apache.accumulo.server.AccumuloDataVersion.ROOT_TABLET_META_CHANGES; import java.io.IOException; @@ -112,8 +113,9 @@ public class UpgradeCoordinator { private int currentVersion; // map of "current version" -> upgrader to next version. // Sorted so upgrades execute in order from the oldest supported data version to current - private final Map<Integer,Upgrader> upgraders = Collections - .unmodifiableMap(new TreeMap<>(Map.of(ROOT_TABLET_META_CHANGES, new Upgrader10to11()))); + private final Map<Integer,Upgrader> upgraders = + Collections.unmodifiableMap(new TreeMap<>(Map.of(ROOT_TABLET_META_CHANGES, + new Upgrader10to11(), REMOVE_DEPRECATIONS_FOR_VERSION_3, new Upgrader11to12()))); private volatile UpgradeStatus status; diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader11to12.java b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader11to12.java new file mode 100644 index 0000000000..2d9f6b4a07 --- /dev/null +++ b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader11to12.java @@ -0,0 +1,209 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.accumulo.manager.upgrade; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.accumulo.core.metadata.RootTable.ZROOT_TABLET; +import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily; +import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ExternalCompactionColumnFamily; +import static org.apache.accumulo.core.metadata.schema.UpgraderDeprecatedConstants.ChoppedColumnFamily; +import static org.apache.accumulo.server.AccumuloDataVersion.METADATA_FILE_JSON_ENCODING; + +import org.apache.accumulo.core.client.Accumulo; +import org.apache.accumulo.core.client.AccumuloClient; +import org.apache.accumulo.core.client.BatchWriter; +import org.apache.accumulo.core.client.IsolatedScanner; +import org.apache.accumulo.core.client.MutationsRejectedException; +import org.apache.accumulo.core.client.Scanner; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.fate.zookeeper.ZooUtil; +import org.apache.accumulo.core.metadata.StoredTabletFile; +import org.apache.accumulo.core.metadata.schema.Ample; +import org.apache.accumulo.core.metadata.schema.RootTabletMetadata; +import org.apache.accumulo.core.security.Authorizations; +import org.apache.accumulo.server.ServerContext; +import org.apache.hadoop.fs.Path; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.data.Stat; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +public class Upgrader11to12 implements Upgrader { + + private static final Logger log = LoggerFactory.getLogger(Upgrader11to12.class); + + @Override + public void upgradeZookeeper(@NonNull ServerContext context) { + log.debug("Upgrade ZooKeeper: upgrading to data version {}", METADATA_FILE_JSON_ENCODING); + var rootBase = ZooUtil.getRoot(context.getInstanceID()) + ZROOT_TABLET; + + try { + var zrw = context.getZooReaderWriter(); + Stat stat = new Stat(); + byte[] rootData = zrw.getData(rootBase, stat); + + String json = new String(rootData, UTF_8); + if (RootTabletMetadata.needsUpgrade(json)) { + log.info("Root metadata in ZooKeeper before upgrade: {}", json); + RootTabletMetadata rtm = RootTabletMetadata.upgrade(json); + zrw.overwritePersistentData(rootBase, rtm.toJson().getBytes(UTF_8), stat.getVersion()); + log.info("Root metadata in ZooKeeper after upgrade: {}", rtm.toJson()); + } + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new IllegalStateException( + "Could not read root metadata from ZooKeeper due to interrupt", ex); + } catch (KeeperException ex) { + throw new IllegalStateException( + "Could not read or write root metadata in ZooKeeper because of ZooKeeper exception", ex); + } + } + + @Override + public void upgradeRoot(@NonNull ServerContext context) { + log.debug("Upgrade root: upgrading to data version {}", METADATA_FILE_JSON_ENCODING); + var rootName = Ample.DataLevel.METADATA.metaTable(); + processReferences(context, rootName); + } + + @Override + public void upgradeMetadata(@NonNull ServerContext context) { + log.debug("Upgrade metadata: upgrading to data version {}", METADATA_FILE_JSON_ENCODING); + var metaName = Ample.DataLevel.USER.metaTable(); + processReferences(context, metaName); + } + + private void processReferences(ServerContext context, String tableName) { + // not using ample to avoid StoredTabletFile because old file ref is incompatible + try (AccumuloClient c = Accumulo.newClient().from(context.getProperties()).build(); + BatchWriter batchWriter = c.createBatchWriter(tableName); Scanner scanner = + new IsolatedScanner(context.createScanner(tableName, Authorizations.EMPTY))) { + + scanner.fetchColumnFamily(DataFileColumnFamily.NAME); + scanner.fetchColumnFamily(ChoppedColumnFamily.NAME); + scanner.fetchColumnFamily(ExternalCompactionColumnFamily.NAME); + scanner.forEach((k, v) -> { + var family = k.getColumnFamily(); + if (family.equals(DataFileColumnFamily.NAME)) { + upgradeDataFileCF(k, v, batchWriter, tableName); + } else if (family.equals(ChoppedColumnFamily.NAME)) { + removeChoppedCF(k, batchWriter, tableName); + } else if (family.equals(ExternalCompactionColumnFamily.NAME)) { + removeExternalCompactionCF(k, batchWriter, tableName); + } else { + throw new IllegalStateException("Processing: " + tableName + + " Received unexpected column family processing references: " + family); + } + }); + } catch (MutationsRejectedException mex) { + log.warn("Failed to update reference for table: " + tableName); + log.warn("Constraint violations: {}", mex.getConstraintViolationSummaries()); + throw new IllegalStateException("Failed to process table: " + tableName, mex); + } catch (Exception ex) { + throw new IllegalStateException("Failed to process table: " + tableName, ex); + } + } + + @VisibleForTesting + void upgradeDataFileCF(final Key key, final Value value, final BatchWriter batchWriter, + final String tableName) { + String file = key.getColumnQualifier().toString(); + // filter out references if they are in the correct format already. + if (fileNeedsConversion(file)) { + var fileJson = StoredTabletFile.of(new Path(file)).getMetadataText(); + try { + Mutation update = new Mutation(key.getRow()); + update.at().family(DataFileColumnFamily.STR_NAME).qualifier(fileJson).put(value); + log.trace("table: {}, adding: {}", tableName, update.prettyPrint()); + batchWriter.addMutation(update); + + Mutation delete = new Mutation(key.getRow()); + delete.at().family(DataFileColumnFamily.STR_NAME).qualifier(file).delete(); + log.trace("table {}: deleting: {}", tableName, delete.prettyPrint()); + batchWriter.addMutation(delete); + } catch (MutationsRejectedException ex) { + // include constraint violation info in log - but stop upgrade + log.warn( + "Failed to update file reference for table: " + tableName + ". row: " + key.getRow()); + log.warn("Constraint violations: {}", ex.getConstraintViolationSummaries()); + throw new IllegalStateException("File conversion failed. Aborting upgrade", ex); + } + } + } + + @VisibleForTesting + void removeChoppedCF(final Key key, final BatchWriter batchWriter, final String tableName) { + Mutation delete = null; + try { + delete = new Mutation(key.getRow()).at().family(ChoppedColumnFamily.STR_NAME) + .qualifier(ChoppedColumnFamily.STR_NAME).delete(); + log.warn( + "Deleting chopped reference from:{}. Previous split or delete may not have completed cleanly. Ref: {}", + tableName, delete.prettyPrint()); + batchWriter.addMutation(delete); + } catch (MutationsRejectedException ex) { + log.warn("Failed to delete obsolete chopped CF reference for table: " + tableName + ". Ref: " + + delete.prettyPrint() + ". Will try to continue. Ref may need to be manually removed"); + log.warn("Constraint violations: {}", ex.getConstraintViolationSummaries()); + throw new IllegalStateException( + "Failed to delete obsolete chopped CF reference for table: " + tableName, ex); + } + } + + @VisibleForTesting + void removeExternalCompactionCF(final Key key, final BatchWriter batchWriter, + final String tableName) { + Mutation delete = null; + try { + delete = new Mutation(key.getRow()).at().family(ExternalCompactionColumnFamily.NAME) + .qualifier(key.getColumnQualifier()).delete(); + log.debug( + "Deleting external compaction reference from:{}. Previous compaction may not have completed. Ref: {}", + tableName, delete.prettyPrint()); + batchWriter.addMutation(delete); + } catch (MutationsRejectedException ex) { + log.warn("Failed to delete obsolete external compaction CF reference for table: " + tableName + + ". Ref: " + delete.prettyPrint() + + ". Will try to continue. Ref may need to be manually removed"); + log.warn("Constraint violations: {}", ex.getConstraintViolationSummaries()); + throw new IllegalStateException( + "Failed to delete obsolete external compaction CF reference for table: " + tableName, ex); + } + } + + /** + * Quick validation to see if value has been converted by checking if the candidate looks like + * json by checking the candidate starts with "{" and ends with "}". + * + * @param candidate a possible file: reference. + * @return false if a likely a json object, true if not a likely json object + */ + @VisibleForTesting + boolean fileNeedsConversion(@NonNull final String candidate) { + String trimmed = candidate.trim(); + boolean needsConversion = !trimmed.startsWith("{") || !trimmed.endsWith("}"); + log.trace("file: {} needs conversion: {}", candidate, needsConversion); + return needsConversion; + } +} diff --git a/server/manager/src/test/java/org/apache/accumulo/manager/upgrade/Upgrader11to12Test.java b/server/manager/src/test/java/org/apache/accumulo/manager/upgrade/Upgrader11to12Test.java new file mode 100644 index 0000000000..da12f4ede2 --- /dev/null +++ b/server/manager/src/test/java/org/apache/accumulo/manager/upgrade/Upgrader11to12Test.java @@ -0,0 +1,317 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.accumulo.manager.upgrade; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily; +import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ExternalCompactionColumnFamily; +import static org.apache.accumulo.core.metadata.schema.UpgraderDeprecatedConstants.ChoppedColumnFamily; +import static org.easymock.EasyMock.capture; +import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.eq; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.expectLastCall; +import static org.easymock.EasyMock.newCapture; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import org.apache.accumulo.core.client.BatchWriter; +import org.apache.accumulo.core.client.MutationsRejectedException; +import org.apache.accumulo.core.data.ColumnUpdate; +import org.apache.accumulo.core.data.InstanceId; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter; +import org.apache.accumulo.core.metadata.StoredTabletFile; +import org.apache.accumulo.server.ServerContext; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Text; +import org.apache.zookeeper.data.Stat; +import org.easymock.Capture; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class Upgrader11to12Test { + + private static final Logger LOG = LoggerFactory.getLogger(Upgrader11to12Test.class); + + @Test + void upgradeDataFileCFTest() throws Exception { + Upgrader11to12 upgrader = new Upgrader11to12(); + + BatchWriter bw = createMock(BatchWriter.class); + Capture<Mutation> capturedAdd = newCapture(); + bw.addMutation(capture(capturedAdd)); + expectLastCall(); + + Capture<Mutation> capturedDelete = newCapture(); + bw.addMutation(capture(capturedDelete)); + expectLastCall(); + + replay(bw); + + String fileName = "hdfs://localhost:8020/accumulo/tables/12/default_tablet/A000000v.rf"; + Key k = Key.builder().row(new Text("12;")).family(DataFileColumnFamily.NAME) + .qualifier(new Text(fileName)).build(); + Value v = new Value("1234,5678"); + + upgrader.upgradeDataFileCF(k, v, bw, "aTable"); + + StoredTabletFile stf = StoredTabletFile.of(new Path(fileName)); + Mutation add = new Mutation(k.getRow()).at().family(DataFileColumnFamily.NAME) + .qualifier(stf.getMetadataText()).put(v); + LOG.debug("add mutation to be expected: {}", add.prettyPrint()); + + Mutation delete = new Mutation(k.getRow()).at().family(DataFileColumnFamily.NAME) + .qualifier(new Text(fileName)).delete(); + LOG.debug("delete mutation to be expected: {}", delete.prettyPrint()); + + assertEquals(add, capturedAdd.getValue()); + assertEquals(delete, capturedDelete.getValue()); + + verify(bw); + } + + @Test + void upgradeDataFileCFSkipConvertedTest() { + Upgrader11to12 upgrader = new Upgrader11to12(); + + BatchWriter bw = createMock(BatchWriter.class); + + replay(bw); + + String fileName = "hdfs://localhost:8020/accumulo/tables/12/default_tablet/A000000v.rf"; + StoredTabletFile stf = StoredTabletFile.of(new Path(fileName)); + + Key k = Key.builder().row(new Text("12;")).family(DataFileColumnFamily.NAME) + .qualifier(stf.getMetadataText()).build(); + Value v = new Value("1234,5678"); + + upgrader.upgradeDataFileCF(k, v, bw, "aTable"); + + // with file entry in correct formation, no mutations are expected. + verify(bw); + } + + @Test + void upgradeDataFileCFInvalidMutationTest() throws Exception { + Upgrader11to12 upgrader = new Upgrader11to12(); + + BatchWriter bw = createMock(BatchWriter.class); + Capture<Mutation> capturedAdd = newCapture(); + bw.addMutation(capture(capturedAdd)); + expectLastCall().andThrow(new MutationsRejectedException(null, List.of(), Map.of(), List.of(), + 0, new NullPointerException())); + + replay(bw); + + String fileName = "hdfs://localhost:8020/accumulo/tables/12/default_tablet/A000000v.rf"; + Key k = Key.builder().row(new Text("12;")).family(DataFileColumnFamily.NAME) + .qualifier(new Text(fileName)).build(); + Value v = new Value("1234,5678"); + + assertThrows(IllegalStateException.class, () -> upgrader.upgradeDataFileCF(k, v, bw, "aTable")); + + verify(bw); + } + + @Test + void upgradeDataFileCFInvalidPathTest() { + Upgrader11to12 upgrader = new Upgrader11to12(); + + BatchWriter bw = createMock(BatchWriter.class); + + replay(bw); + + String invalidPath = "badPath"; + + Key k = Key.builder().row(new Text("12;")).family(DataFileColumnFamily.NAME) + .qualifier(new Text(invalidPath)).build(); + Value v = new Value("1234,5678"); + + assertThrows(IllegalArgumentException.class, + () -> upgrader.upgradeDataFileCF(k, v, bw, "aTable")); + + verify(bw); + } + + @Test + void removeChoppedCFTest() throws Exception { + Upgrader11to12 upgrader = new Upgrader11to12(); + + Key k = Key.builder().row(new Text("12;")).family(ExternalCompactionColumnFamily.NAME) + .qualifier(ExternalCompactionColumnFamily.NAME).build(); + + BatchWriter bw = createMock(BatchWriter.class); + Capture<Mutation> captured = newCapture(); + bw.addMutation(capture(captured)); + expectLastCall(); + + replay(bw); + + upgrader.removeChoppedCF(k, bw, "aTable"); + + Mutation delete = new Mutation(k.getRow()).at().family(ChoppedColumnFamily.NAME) + .qualifier(ChoppedColumnFamily.NAME).delete(); + + assertEquals(delete, captured.getValue()); + + verify(bw); + } + + @Test + void removeChoppedCFContinuesTest() throws Exception { + Upgrader11to12 upgrader = new Upgrader11to12(); + + Key k = Key.builder().row(new Text("12;")).family(ExternalCompactionColumnFamily.NAME) + .qualifier(ExternalCompactionColumnFamily.NAME).build(); + + BatchWriter bw = createMock(BatchWriter.class); + Capture<Mutation> captured = newCapture(); + bw.addMutation(capture(captured)); + expectLastCall().andThrow(new MutationsRejectedException(null, List.of(), Map.of(), List.of(), + 0, new NullPointerException())); + + replay(bw); + + assertThrows(IllegalStateException.class, () -> upgrader.removeChoppedCF(k, bw, "aTable")); + + verify(bw); + } + + @Test + void removeExternalCompactionCFTest() throws Exception { + Upgrader11to12 upgrader = new Upgrader11to12(); + + Key k = Key.builder().row(new Text("12;")).family(ExternalCompactionColumnFamily.NAME) + .qualifier(new Text("ECID:1234")).build(); + + BatchWriter bw = createMock(BatchWriter.class); + Capture<Mutation> captured = newCapture(); + bw.addMutation(capture(captured)); + expectLastCall(); + + replay(bw); + + upgrader.removeExternalCompactionCF(k, bw, "aTable"); + + Mutation delete = new Mutation(k.getRow()).at().family(ExternalCompactionColumnFamily.NAME) + .qualifier(new Text("ECID:1234")).delete(); + + assertEquals(delete, captured.getValue()); + + for (ColumnUpdate update : captured.getValue().getUpdates()) { + assertEquals(ExternalCompactionColumnFamily.STR_NAME, + new String(update.getColumnFamily(), UTF_8)); + assertEquals("ECID:1234", new String(update.getColumnQualifier(), UTF_8)); + assertTrue(update.isDeleted()); + } + verify(bw); + } + + @Test + void removeExternalCompactionCFContinuesTest() throws Exception { + Upgrader11to12 upgrader = new Upgrader11to12(); + + Key k = Key.builder().row(new Text("12;")).family(ExternalCompactionColumnFamily.NAME) + .qualifier(new Text("ECID:1234")).build(); + + BatchWriter bw = createMock(BatchWriter.class); + Capture<Mutation> captured = newCapture(); + bw.addMutation(capture(captured)); + expectLastCall().andThrow(new MutationsRejectedException(null, List.of(), Map.of(), List.of(), + 0, new NullPointerException())); + + replay(bw); + + assertThrows(IllegalStateException.class, + () -> upgrader.removeExternalCompactionCF(k, bw, "aTable")); + + verify(bw); + } + + @Test + public void upgradeZooKeeperTest() throws Exception { + + // taken from an uno instance. + final byte[] zKRootV1 = + "{\"version\":1,\"columnValues\":{\"file\":{\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/A0000030.rf\":\"856,15\",\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/F000000r.rf\":\"308,2\"},\"last\":{\"100017f46240004\":\"localhost:9997\"},\"loc\":{\"100017f46240004\":\"localhost:9997\"},\"srv\":{\"dir\":\"root_tablet\",\"flush\":\"16\",\"lock\":\"tservers/localhost:9997/zlock#f6a582b9-9583-4553-b179-a7a3852c8332#0000000000$100017f46240004\",\"time\":\"L42\"},\"~tab [...] + .getBytes(UTF_8); + final String zKRootV2 = + "{\"version\":2,\"columnValues\":{\"file\":{\"{\\\"path\\\":\\\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/A0000030.rf\\\",\\\"startRow\\\":\\\"\\\",\\\"endRow\\\":\\\"\\\"}\":\"856,15\",\"{\\\"path\\\":\\\"hdfs://localhost:8020/accumulo/tables/+r/root_tablet/F000000r.rf\\\",\\\"startRow\\\":\\\"\\\",\\\"endRow\\\":\\\"\\\"}\":\"308,2\"},\"last\":{\"100017f46240004\":\"localhost:9997\"},\"loc\":{\"100017f46240004\":\"localhost:9997\"},\"srv\":{\"dir\":\"root_tablet\",\" [...] + + InstanceId iid = InstanceId.of(UUID.randomUUID()); + Upgrader11to12 upgrader = new Upgrader11to12(); + + ServerContext context = createMock(ServerContext.class); + ZooReaderWriter zrw = createMock(ZooReaderWriter.class); + + expect(context.getInstanceID()).andReturn(iid).anyTimes(); + expect(context.getZooReaderWriter()).andReturn(zrw).anyTimes(); + + Capture<Stat> statCapture = newCapture(); + expect(zrw.getData(eq("/accumulo/" + iid.canonical() + "/root_tablet"), capture(statCapture))) + .andAnswer(() -> { + Stat stat = statCapture.getValue(); + stat.setCtime(System.currentTimeMillis()); + stat.setMtime(System.currentTimeMillis()); + stat.setVersion(123); // default version + stat.setDataLength(zKRootV1.length); + statCapture.setValue(stat); + return zKRootV1; + }).once(); + + Capture<byte[]> byteCapture = newCapture(); + expect(zrw.overwritePersistentData(eq("/accumulo/" + iid.canonical() + "/root_tablet"), + capture(byteCapture), eq(123))).andReturn(true).once(); + + replay(context, zrw); + + upgrader.upgradeZookeeper(context); + + assertEquals(zKRootV2, new String(byteCapture.getValue(), UTF_8)); + + verify(context, zrw); + } + + @Test + public void fileConversionTest() { + String s21 = "hdfs://localhost:8020/accumulo/tables/1/t-0000000/A000003v.rf"; + String s31 = + "{\"path\":\"hdfs://localhost:8020/accumulo/tables/1/t-0000000/A000003v.rf\",\"startRow\":\"\",\"endRow\":\"\"}"; + String s31_untrimmed = + " { \"path\":\"hdfs://localhost:8020/accumulo/tables/1/t-0000000/A000003v.rf\",\"startRow\":\"\",\"endRow\":\"\" } "; + + Upgrader11to12 upgrader = new Upgrader11to12(); + + assertTrue(upgrader.fileNeedsConversion(s21)); + assertFalse(upgrader.fileNeedsConversion(s31)); + assertFalse(upgrader.fileNeedsConversion(s31_untrimmed)); + } +}