This is an automated email from the ASF dual-hosted git repository.
kturner pushed a commit to branch elasticity
in repository https://gitbox.apache.org/repos/asf/accumulo.git
The following commit(s) were added to refs/heads/elasticity by this push:
new eb71e3e4a6 FateId to use UUID instead of long (#4388)
eb71e3e4a6 is described below
commit eb71e3e4a6af3c37ee27c9f5cb3b26277d74b05b
Author: Kevin Rathbun <[email protected]>
AuthorDate: Sat Mar 16 12:25:26 2024 -0400
FateId to use UUID instead of long (#4388)
- FateId now uses a 128bit UUID instead of a (64bit) long
- FateIds created by ZooStore and AccumuloStore now use a v4 UUID
(random UUID)
- FateIds created by FateIdGenerator now use a v3 UUID (name based)
so the same FateKey gives the same UUID
- Necessary updates to classes and tests which used the long id
---
.../accumulo/core/fate/AbstractFateStore.java | 12 +-
.../java/org/apache/accumulo/core/fate/FateId.java | 56 ++++---
.../org/apache/accumulo/core/fate/ZooStore.java | 8 +-
.../accumulo/core/fate/accumulo/AccumuloStore.java | 8 +-
.../core/fate/zookeeper/ZooReservation.java | 2 +-
.../accumulo/core/manager/thrift/TFateId.java | 125 ++++++++--------
core/src/main/thrift/manager.thrift | 2 +-
.../org/apache/accumulo/core/fate/TestStore.java | 4 +-
.../core/metadata/schema/SelectedFilesTest.java | 19 ++-
.../core/metadata/schema/TabletMetadataTest.java | 61 ++++----
.../server/compaction/CompactionConfigStorage.java | 4 +-
.../constraints/MetadataConstraintsTest.java | 52 +++----
.../server/manager/state/TabletManagementTest.java | 8 +-
.../server/util/fateCommand/SummaryReportTest.java | 3 +-
.../server/util/fateCommand/TxnDetailsTest.java | 14 +-
.../accumulo/manager/FateServiceHandler.java | 8 +-
.../coordinator/CompactionCoordinator.java | 3 +-
.../compaction/CompactionCoordinatorTest.java | 6 +-
.../manager/tableOps/ShutdownTServerTest.java | 3 +-
.../manager/tableOps/merge/MergeTabletsTest.java | 4 +-
.../manager/tableOps/split/UpdateTabletsTest.java | 16 +-
.../org/apache/accumulo/test/ScanServerIT.java | 3 +-
.../test/fate/accumulo/AccumuloFateIT.java | 2 +-
.../test/fate/accumulo/AccumuloStoreIT.java | 19 ++-
.../test/fate/accumulo/FateMutatorImplIT.java | 10 +-
.../accumulo/test/fate/accumulo/FateStoreIT.java | 12 +-
.../test/fate/zookeeper/ZooStoreFateIT.java | 2 +-
.../test/fate/zookeeper/ZookeeperFateIT.java | 2 +-
.../test/functional/AmpleConditionalWriterIT.java | 161 ++++++++++-----------
.../test/functional/ManagerAssignmentIT.java | 3 +-
.../apache/accumulo/test/functional/MergeIT.java | 4 +-
.../accumulo/test/functional/SplitRecoveryIT.java | 3 +-
.../functional/TabletManagementIteratorIT.java | 5 +-
33 files changed, 334 insertions(+), 310 deletions(-)
diff --git
a/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java
b/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java
index d805b230b2..0bec78d196 100644
--- a/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java
+++ b/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java
@@ -35,6 +35,7 @@ import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
+import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
@@ -48,8 +49,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
-import com.google.common.hash.HashCode;
-import com.google.common.hash.Hashing;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
@@ -64,9 +63,8 @@ public abstract class AbstractFateStore<T> implements
FateStore<T> {
public static final FateIdGenerator DEFAULT_FATE_ID_GENERATOR = new
FateIdGenerator() {
@Override
public FateId fromTypeAndKey(FateInstanceType instanceType, FateKey
fateKey) {
- HashCode hashCode =
Hashing.murmur3_128().hashBytes(fateKey.getSerialized());
- long tid = hashCode.asLong() & 0x7fffffffffffffffL;
- return FateId.from(instanceType, tid);
+ UUID txUUID = UUID.nameUUIDFromBytes(fateKey.getSerialized());
+ return FateId.from(instanceType, txUUID);
}
};
@@ -271,9 +269,9 @@ public abstract class AbstractFateStore<T> implements
FateStore<T> {
// mean a collision
if (status == TStatus.NEW) {
Preconditions.checkState(tFateKey.isPresent(), "Tx Key is missing from
tid %s",
- fateId.getTid());
+ fateId.getTxUUIDStr());
Preconditions.checkState(fateKey.equals(tFateKey.orElseThrow()),
- "Collision detected for tid %s", fateId.getTid());
+ "Collision detected for tid %s", fateId.getTxUUIDStr());
// Case 2: Status is some other state which means already in progress
// so we can just log and return empty optional
} else {
diff --git a/core/src/main/java/org/apache/accumulo/core/fate/FateId.java
b/core/src/main/java/org/apache/accumulo/core/fate/FateId.java
index 8907c6879c..e398ef4efe 100644
--- a/core/src/main/java/org/apache/accumulo/core/fate/FateId.java
+++ b/core/src/main/java/org/apache/accumulo/core/fate/FateId.java
@@ -18,6 +18,7 @@
*/
package org.apache.accumulo.core.fate;
+import java.util.UUID;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -25,21 +26,21 @@ import java.util.stream.Stream;
import org.apache.accumulo.core.data.AbstractId;
import org.apache.accumulo.core.manager.thrift.TFateId;
import org.apache.accumulo.core.manager.thrift.TFateInstanceType;
-import org.apache.accumulo.core.util.FastFormat;
/**
* A strongly typed FATE Transaction ID. This is used to uniquely identify a
FATE transaction.
- * Consists of its {@link FateInstanceType} and its transaction id (long). The
canonical string is
- * of the form "FATE:[FateInstanceType]:[hex long tid]" (without the brackets).
+ * Consists of its {@link FateInstanceType} and its transaction {@link UUID}.
The canonical string
+ * is of the form "FATE:[FateInstanceType]:[UUID]" (without the brackets).
*/
public class FateId extends AbstractId<FateId> {
private static final long serialVersionUID = 1L;
private static final String PREFIX = "FATE:";
- private static final Pattern HEX_PATTERN = Pattern.compile("^[0-9a-fA-F]+$");
+ private static final String UUID_REGEX =
"[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}";
+ private static final Pattern UUID_PATTERN = Pattern.compile("^" + UUID_REGEX
+ "$");
private static final Pattern FATEID_PATTERN = Pattern.compile("^" + PREFIX +
"("
- +
Stream.of(FateInstanceType.values()).map(Enum::name).collect(Collectors.joining("|"))
- + "):[0-9a-fA-F]+$");
+ +
Stream.of(FateInstanceType.values()).map(Enum::name).collect(Collectors.joining("|"))
+ "):"
+ + UUID_REGEX + "$");
private FateId(String canonical) {
super(canonical);
@@ -53,16 +54,16 @@ public class FateId extends AbstractId<FateId> {
}
/**
- * @return the decimal value of the transaction id
+ * @return the transaction {@link UUID}
*/
- public long getTid() {
- return Long.parseLong(getHexTid(), 16);
+ public UUID getTxUUID() {
+ return UUID.fromString(getTxUUIDStr());
}
/**
- * @return the hexadecimal value of the transaction id
+ * @return the transaction {@link UUID} as a String
*/
- public String getHexTid() {
+ public String getTxUUIDStr() {
return canonical().split(":")[2];
}
@@ -70,25 +71,25 @@ public class FateId extends AbstractId<FateId> {
* Creates a new FateId object from the given parameters
*
* @param type the {@link FateInstanceType}
- * @param tid the decimal transaction id
+ * @param txUUID the {@link UUID}
* @return a new FateId object
*/
- public static FateId from(FateInstanceType type, long tid) {
- return new FateId(PREFIX + type + ":" + formatTid(tid));
+ public static FateId from(FateInstanceType type, UUID txUUID) {
+ return new FateId(PREFIX + type + ":" + txUUID);
}
/**
* Creates a new FateId object from the given parameters
*
* @param type the {@link FateInstanceType}
- * @param hexTid the hexadecimal transaction id
+ * @param txUUIDStr the transaction {@link UUID} as a String
* @return a new FateId object
*/
- public static FateId from(FateInstanceType type, String hexTid) {
- if (HEX_PATTERN.matcher(hexTid).matches()) {
- return new FateId(PREFIX + type + ":" + hexTid);
+ public static FateId from(FateInstanceType type, String txUUIDStr) {
+ if (UUID_PATTERN.matcher(txUUIDStr).matches()) {
+ return new FateId(PREFIX + type + ":" + txUUIDStr);
} else {
- throw new IllegalArgumentException("Invalid Hex Transaction ID: " +
hexTid);
+ throw new IllegalArgumentException("Invalid Transaction UUID: " +
txUUIDStr);
}
}
@@ -118,7 +119,7 @@ public class FateId extends AbstractId<FateId> {
*/
public static FateId fromThrift(TFateId tFateId) {
FateInstanceType type;
- long tid = tFateId.getTid();
+ String txUUIDStr = tFateId.getTxUUIDStr();
switch (tFateId.getType()) {
case USER:
@@ -131,7 +132,11 @@ public class FateId extends AbstractId<FateId> {
throw new IllegalArgumentException("Invalid TFateInstanceType: " +
tFateId.getType());
}
- return new FateId(PREFIX + type + ":" + formatTid(tid));
+ if (UUID_PATTERN.matcher(txUUIDStr).matches()) {
+ return new FateId(PREFIX + type + ":" + txUUIDStr);
+ } else {
+ throw new IllegalArgumentException("Invalid Transaction UUID: " +
txUUIDStr);
+ }
}
/**
@@ -151,13 +156,6 @@ public class FateId extends AbstractId<FateId> {
default:
throw new IllegalArgumentException("Invalid FateInstanceType: " +
type);
}
- return new TFateId(thriftType, getTid());
- }
-
- /**
- * Returns the hex string equivalent of the tid
- */
- public static String formatTid(long tid) {
- return FastFormat.toHexString(tid);
+ return new TFateId(thriftType, getTxUUIDStr());
}
}
diff --git a/core/src/main/java/org/apache/accumulo/core/fate/ZooStore.java
b/core/src/main/java/org/apache/accumulo/core/fate/ZooStore.java
index af6fd233de..77f7a50a6e 100644
--- a/core/src/main/java/org/apache/accumulo/core/fate/ZooStore.java
+++ b/core/src/main/java/org/apache/accumulo/core/fate/ZooStore.java
@@ -20,7 +20,6 @@ package org.apache.accumulo.core.fate;
import static
com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
@@ -32,6 +31,7 @@ import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
+import java.util.UUID;
import java.util.function.Supplier;
import java.util.stream.Stream;
@@ -62,7 +62,7 @@ public class ZooStore<T> extends AbstractFateStore<T> {
private ZooReaderWriter zk;
private String getTXPath(FateId fateId) {
- return path + "/tx_" + fateId.getHexTid();
+ return path + "/tx_" + fateId.getTxUUIDStr();
}
public ZooStore(String path, ZooReaderWriter zk) throws KeeperException,
InterruptedException {
@@ -88,9 +88,7 @@ public class ZooStore<T> extends AbstractFateStore<T> {
public FateId create() {
while (true) {
try {
- // looking at the code for SecureRandom, it appears to be thread safe
- long tid = RANDOM.get().nextLong() & 0x7fffffffffffffffL;
- FateId fateId = FateId.from(fateInstanceType, tid);
+ FateId fateId = FateId.from(fateInstanceType, UUID.randomUUID());
zk.putPersistentData(getTXPath(fateId), new
NodeValue(TStatus.NEW).serialize(),
NodeExistsPolicy.FAIL);
return fateId;
diff --git
a/core/src/main/java/org/apache/accumulo/core/fate/accumulo/AccumuloStore.java
b/core/src/main/java/org/apache/accumulo/core/fate/accumulo/AccumuloStore.java
index 24ccb43baf..86438326c1 100644
---
a/core/src/main/java/org/apache/accumulo/core/fate/accumulo/AccumuloStore.java
+++
b/core/src/main/java/org/apache/accumulo/core/fate/accumulo/AccumuloStore.java
@@ -18,13 +18,12 @@
*/
package org.apache.accumulo.core.fate.accumulo;
-import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
-
import java.io.Serializable;
import java.util.List;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Optional;
+import java.util.UUID;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -116,8 +115,7 @@ public class AccumuloStore<T> extends AbstractFateStore<T> {
}
public FateId getFateId() {
- long tid = RANDOM.get().nextLong() & 0x7fffffffffffffffL;
- return FateId.from(fateInstanceType, tid);
+ return FateId.from(fateInstanceType, UUID.randomUUID());
}
@Override
@@ -250,7 +248,7 @@ public class AccumuloStore<T> extends AbstractFateStore<T> {
}
public static String getRowId(FateId fateId) {
- return "tx_" + fateId.getHexTid();
+ return "tx_" + fateId.getTxUUIDStr();
}
private FateMutatorImpl<T> newMutator(FateId fateId) {
diff --git
a/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/ZooReservation.java
b/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/ZooReservation.java
index 8c6a918301..88d7cbedef 100644
---
a/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/ZooReservation.java
+++
b/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/ZooReservation.java
@@ -30,7 +30,7 @@ import org.slf4j.LoggerFactory;
public class ZooReservation {
- private static final String DELIMITER = "-";
+ private static final String DELIMITER = "_";
public static boolean attempt(ZooReaderWriter zk, String path, FateId
fateId, String debugInfo)
throws KeeperException, InterruptedException {
diff --git
a/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/TFateId.java
b/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/TFateId.java
index d781407397..eab7a23068 100644
---
a/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/TFateId.java
+++
b/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/TFateId.java
@@ -29,7 +29,7 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new
org.apache.thrift.protocol.TStruct("TFateId");
private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new
org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32,
(short)1);
- private static final org.apache.thrift.protocol.TField TID_FIELD_DESC = new
org.apache.thrift.protocol.TField("tid", org.apache.thrift.protocol.TType.I64,
(short)2);
+ private static final org.apache.thrift.protocol.TField TX_UUIDSTR_FIELD_DESC
= new org.apache.thrift.protocol.TField("txUUIDStr",
org.apache.thrift.protocol.TType.STRING, (short)2);
private static final org.apache.thrift.scheme.SchemeFactory
STANDARD_SCHEME_FACTORY = new TFateIdStandardSchemeFactory();
private static final org.apache.thrift.scheme.SchemeFactory
TUPLE_SCHEME_FACTORY = new TFateIdTupleSchemeFactory();
@@ -39,7 +39,7 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
* @see TFateInstanceType
*/
public @org.apache.thrift.annotation.Nullable TFateInstanceType type; //
required
- public long tid; // required
+ public @org.apache.thrift.annotation.Nullable java.lang.String txUUIDStr; //
required
/** The set of fields this struct contains, along with convenience methods
for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -48,7 +48,7 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
* @see TFateInstanceType
*/
TYPE((short)1, "type"),
- TID((short)2, "tid");
+ TX_UUIDSTR((short)2, "txUUIDStr");
private static final java.util.Map<java.lang.String, _Fields> byName = new
java.util.HashMap<java.lang.String, _Fields>();
@@ -66,8 +66,8 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
switch(fieldId) {
case 1: // TYPE
return TYPE;
- case 2: // TID
- return TID;
+ case 2: // TX_UUIDSTR
+ return TX_UUIDSTR;
default:
return null;
}
@@ -111,15 +111,13 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
}
// isset id assignments
- private static final int __TID_ISSET_ID = 0;
- private byte __isset_bitfield = 0;
public static final java.util.Map<_Fields,
org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new java.util.EnumMap<_Fields,
org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.TYPE, new
org.apache.thrift.meta_data.FieldMetaData("type",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new
org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
TFateInstanceType.class)));
- tmpMap.put(_Fields.TID, new
org.apache.thrift.meta_data.FieldMetaData("tid",
org.apache.thrift.TFieldRequirementType.DEFAULT,
- new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.TX_UUIDSTR, new
org.apache.thrift.meta_data.FieldMetaData("txUUIDStr",
org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TFateId.class,
metaDataMap);
}
@@ -129,23 +127,23 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
public TFateId(
TFateInstanceType type,
- long tid)
+ java.lang.String txUUIDStr)
{
this();
this.type = type;
- this.tid = tid;
- setTidIsSet(true);
+ this.txUUIDStr = txUUIDStr;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public TFateId(TFateId other) {
- __isset_bitfield = other.__isset_bitfield;
if (other.isSetType()) {
this.type = other.type;
}
- this.tid = other.tid;
+ if (other.isSetTxUUIDStr()) {
+ this.txUUIDStr = other.txUUIDStr;
+ }
}
@Override
@@ -156,8 +154,7 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
@Override
public void clear() {
this.type = null;
- setTidIsSet(false);
- this.tid = 0;
+ this.txUUIDStr = null;
}
/**
@@ -193,27 +190,29 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
}
}
- public long getTid() {
- return this.tid;
+ @org.apache.thrift.annotation.Nullable
+ public java.lang.String getTxUUIDStr() {
+ return this.txUUIDStr;
}
- public TFateId setTid(long tid) {
- this.tid = tid;
- setTidIsSet(true);
+ public TFateId setTxUUIDStr(@org.apache.thrift.annotation.Nullable
java.lang.String txUUIDStr) {
+ this.txUUIDStr = txUUIDStr;
return this;
}
- public void unsetTid() {
- __isset_bitfield =
org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TID_ISSET_ID);
+ public void unsetTxUUIDStr() {
+ this.txUUIDStr = null;
}
- /** Returns true if field tid is set (has been assigned a value) and false
otherwise */
- public boolean isSetTid() {
- return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield,
__TID_ISSET_ID);
+ /** Returns true if field txUUIDStr is set (has been assigned a value) and
false otherwise */
+ public boolean isSetTxUUIDStr() {
+ return this.txUUIDStr != null;
}
- public void setTidIsSet(boolean value) {
- __isset_bitfield =
org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TID_ISSET_ID, value);
+ public void setTxUUIDStrIsSet(boolean value) {
+ if (!value) {
+ this.txUUIDStr = null;
+ }
}
@Override
@@ -227,11 +226,11 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
}
break;
- case TID:
+ case TX_UUIDSTR:
if (value == null) {
- unsetTid();
+ unsetTxUUIDStr();
} else {
- setTid((java.lang.Long)value);
+ setTxUUIDStr((java.lang.String)value);
}
break;
@@ -245,8 +244,8 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
case TYPE:
return getType();
- case TID:
- return getTid();
+ case TX_UUIDSTR:
+ return getTxUUIDStr();
}
throw new java.lang.IllegalStateException();
@@ -262,8 +261,8 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
switch (field) {
case TYPE:
return isSetType();
- case TID:
- return isSetTid();
+ case TX_UUIDSTR:
+ return isSetTxUUIDStr();
}
throw new java.lang.IllegalStateException();
}
@@ -290,12 +289,12 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
return false;
}
- boolean this_present_tid = true;
- boolean that_present_tid = true;
- if (this_present_tid || that_present_tid) {
- if (!(this_present_tid && that_present_tid))
+ boolean this_present_txUUIDStr = true && this.isSetTxUUIDStr();
+ boolean that_present_txUUIDStr = true && that.isSetTxUUIDStr();
+ if (this_present_txUUIDStr || that_present_txUUIDStr) {
+ if (!(this_present_txUUIDStr && that_present_txUUIDStr))
return false;
- if (this.tid != that.tid)
+ if (!this.txUUIDStr.equals(that.txUUIDStr))
return false;
}
@@ -310,7 +309,9 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
if (isSetType())
hashCode = hashCode * 8191 + type.getValue();
- hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(tid);
+ hashCode = hashCode * 8191 + ((isSetTxUUIDStr()) ? 131071 : 524287);
+ if (isSetTxUUIDStr())
+ hashCode = hashCode * 8191 + txUUIDStr.hashCode();
return hashCode;
}
@@ -333,12 +334,12 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
return lastComparison;
}
}
- lastComparison = java.lang.Boolean.compare(isSetTid(), other.isSetTid());
+ lastComparison = java.lang.Boolean.compare(isSetTxUUIDStr(),
other.isSetTxUUIDStr());
if (lastComparison != 0) {
return lastComparison;
}
- if (isSetTid()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tid,
other.tid);
+ if (isSetTxUUIDStr()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txUUIDStr,
other.txUUIDStr);
if (lastComparison != 0) {
return lastComparison;
}
@@ -375,8 +376,12 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
}
first = false;
if (!first) sb.append(", ");
- sb.append("tid:");
- sb.append(this.tid);
+ sb.append("txUUIDStr:");
+ if (this.txUUIDStr == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.txUUIDStr);
+ }
first = false;
sb.append(")");
return sb.toString();
@@ -397,8 +402,6 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
private void readObject(java.io.ObjectInputStream in) throws
java.io.IOException, java.lang.ClassNotFoundException {
try {
- // it doesn't seem like you should have to do this, but java
serialization is wacky, and doesn't call the default constructor.
- __isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new
org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
@@ -433,10 +436,10 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
org.apache.thrift.protocol.TProtocolUtil.skip(iprot,
schemeField.type);
}
break;
- case 2: // TID
- if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
- struct.tid = iprot.readI64();
- struct.setTidIsSet(true);
+ case 2: // TX_UUIDSTR
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.txUUIDStr = iprot.readString();
+ struct.setTxUUIDStrIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot,
schemeField.type);
}
@@ -462,9 +465,11 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
oprot.writeI32(struct.type.getValue());
oprot.writeFieldEnd();
}
- oprot.writeFieldBegin(TID_FIELD_DESC);
- oprot.writeI64(struct.tid);
- oprot.writeFieldEnd();
+ if (struct.txUUIDStr != null) {
+ oprot.writeFieldBegin(TX_UUIDSTR_FIELD_DESC);
+ oprot.writeString(struct.txUUIDStr);
+ oprot.writeFieldEnd();
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -487,15 +492,15 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
if (struct.isSetType()) {
optionals.set(0);
}
- if (struct.isSetTid()) {
+ if (struct.isSetTxUUIDStr()) {
optionals.set(1);
}
oprot.writeBitSet(optionals, 2);
if (struct.isSetType()) {
oprot.writeI32(struct.type.getValue());
}
- if (struct.isSetTid()) {
- oprot.writeI64(struct.tid);
+ if (struct.isSetTxUUIDStr()) {
+ oprot.writeString(struct.txUUIDStr);
}
}
@@ -508,8 +513,8 @@ public class TFateId implements
org.apache.thrift.TBase<TFateId, TFateId._Fields
struct.setTypeIsSet(true);
}
if (incoming.get(1)) {
- struct.tid = iprot.readI64();
- struct.setTidIsSet(true);
+ struct.txUUIDStr = iprot.readString();
+ struct.setTxUUIDStrIsSet(true);
}
}
}
diff --git a/core/src/main/thrift/manager.thrift
b/core/src/main/thrift/manager.thrift
index f87298e301..f9b5882f07 100644
--- a/core/src/main/thrift/manager.thrift
+++ b/core/src/main/thrift/manager.thrift
@@ -171,7 +171,7 @@ enum TFateInstanceType {
struct TFateId {
1:TFateInstanceType type
- 2:i64 tid
+ 2:string txUUIDStr
}
service FateService {
diff --git a/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java
b/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java
index 50046a4b9b..db2d7da770 100644
--- a/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java
+++ b/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java
@@ -27,6 +27,7 @@ import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
+import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
@@ -43,7 +44,6 @@ import org.apache.accumulo.core.util.Pair;
*/
public class TestStore implements FateStore<String> {
- private long nextId = 1;
private final Map<FateId,Pair<TStatus,Optional<FateKey>>> statuses = new
HashMap<>();
private final Map<FateId,Map<Fate.TxInfo,Serializable>> txInfos = new
HashMap<>();
private final Set<FateId> reserved = new HashSet<>();
@@ -51,7 +51,7 @@ public class TestStore implements FateStore<String> {
@Override
public FateId create() {
- FateId fateId = FateId.from(fateInstanceType, nextId++);
+ FateId fateId = FateId.from(fateInstanceType, UUID.randomUUID());
statuses.put(fateId, new Pair<>(TStatus.NEW, Optional.empty()));
return fateId;
}
diff --git
a/core/src/test/java/org/apache/accumulo/core/metadata/schema/SelectedFilesTest.java
b/core/src/test/java/org/apache/accumulo/core/metadata/schema/SelectedFilesTest.java
index a0a02f3d1c..a8bd1e67da 100644
---
a/core/src/test/java/org/apache/accumulo/core/metadata/schema/SelectedFilesTest.java
+++
b/core/src/test/java/org/apache/accumulo/core/metadata/schema/SelectedFilesTest.java
@@ -29,6 +29,7 @@ import java.util.List;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
+import java.util.UUID;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -53,7 +54,7 @@ public class SelectedFilesTest {
@Test
public void testSerializationDeserialization() {
Set<StoredTabletFile> files = getStoredTabletFiles(2);
- FateId fateId = FateId.from(FateInstanceType.META, 12345L);
+ FateId fateId = FateId.from(FateInstanceType.META, UUID.randomUUID());
SelectedFiles original = new SelectedFiles(files, true, fateId);
@@ -70,7 +71,7 @@ public class SelectedFilesTest {
@Test
public void testEqualSerialization() {
Set<StoredTabletFile> files = getStoredTabletFiles(16);
- FateId fateId = FateId.from(FateInstanceType.META, 12345L);
+ FateId fateId = FateId.from(FateInstanceType.META, UUID.randomUUID());
SelectedFiles sf1 = new SelectedFiles(files, true, fateId);
SelectedFiles sf2 = new SelectedFiles(files, true, fateId);
@@ -87,7 +88,7 @@ public class SelectedFilesTest {
public void testDifferentFilesOrdering() {
Set<StoredTabletFile> files = getStoredTabletFiles(16);
SortedSet<StoredTabletFile> sortedFiles = new TreeSet<>(files);
- FateId fateId = FateId.from(FateInstanceType.META, 654123L);
+ FateId fateId = FateId.from(FateInstanceType.META, UUID.randomUUID());
assertEquals(files, sortedFiles, "Entries in test file sets should be the
same");
assertNotEquals(files.toString(), sortedFiles.toString(),
@@ -108,7 +109,7 @@ public class SelectedFilesTest {
public void testJsonSuperSetSubset() {
Set<StoredTabletFile> filesSuperSet = getStoredTabletFiles(3);
Set<StoredTabletFile> filesSubSet = new HashSet<>(filesSuperSet);
- FateId fateId = FateId.from(FateInstanceType.META, 123456L);
+ FateId fateId = FateId.from(FateInstanceType.META, UUID.randomUUID());
// Remove an element to create a subset
filesSubSet.remove(filesSubSet.iterator().next());
@@ -133,9 +134,11 @@ public class SelectedFilesTest {
}
private static Stream<Arguments> provideTestJsons() {
- return Stream.of(Arguments.of("FATE:META:123456", true, 12),
- Arguments.of("FATE:META:123456", false, 12),
Arguments.of("FATE:META:123456", false, 23),
- Arguments.of("FATE:META:654321", false, 23),
Arguments.of("FATE:META:AE56E", false, 23));
+ return
Stream.of(Arguments.of("FATE:META:12345678-9abc-def1-2345-6789abcdef12", true,
12),
+ Arguments.of("FATE:META:12345678-9abc-def1-2345-6789abcdef12", false,
12),
+ Arguments.of("FATE:META:12345678-9abc-def1-2345-6789abcdef12", false,
23),
+ Arguments.of("FATE:META:abcdef12-3456-789a-bcde-f123456789ab", false,
23),
+ Arguments.of("FATE:META:41b40c7c-55e5-4d3b-8d21-1b70d1e7f3fb", false,
23));
}
/**
@@ -175,7 +178,7 @@ public class SelectedFilesTest {
*
* <pre>
* {
- * "fateId": "FATE:META:123456",
+ * "fateId": "FATE:META:12345678-9abc-def1-2345-6789abcdef12",
* "selAll": true,
* "files": ["/path/to/file1.rf", "/path/to/file2.rf"]
* }
diff --git
a/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
b/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
index 3e1f45786e..eda2f2f2ba 100644
---
a/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
+++
b/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
@@ -99,8 +99,8 @@ public class TabletMetadataTest {
Mutation mutation = TabletColumnFamily.createPrevRowMutation(extent);
FateInstanceType type = FateInstanceType.fromTableId(extent.tableId());
- FateId fateId56L = FateId.from(type, 56L);
- FateId fateId59L = FateId.from(type, 59L);
+ FateId fateId1 = FateId.from(type, UUID.randomUUID());
+ FateId fateId2 = FateId.from(type, UUID.randomUUID());
DIRECTORY_COLUMN.put(mutation, new Value("t-0001757"));
FLUSH_COLUMN.put(mutation, new Value("6"));
@@ -108,8 +108,8 @@ public class TabletMetadataTest {
String bf1 = serialize("hdfs://nn1/acc/tables/1/t-0001/bf1");
String bf2 = serialize("hdfs://nn1/acc/tables/1/t-0001/bf2");
-
mutation.at().family(BulkFileColumnFamily.NAME).qualifier(bf1).put(fateId56L.canonical());
-
mutation.at().family(BulkFileColumnFamily.NAME).qualifier(bf2).put(fateId59L.canonical());
+
mutation.at().family(BulkFileColumnFamily.NAME).qualifier(bf1).put(fateId1.canonical());
+
mutation.at().family(BulkFileColumnFamily.NAME).qualifier(bf2).put(fateId2.canonical());
mutation.at().family(ClonedColumnFamily.NAME).qualifier("").put("OK");
@@ -135,8 +135,8 @@ public class TabletMetadataTest {
mutation.at().family(ScanFileColumnFamily.NAME).qualifier(sf2.getMetadata()).put("");
MERGED_COLUMN.put(mutation, new Value());
- mutation.put(UserCompactionRequestedColumnFamily.STR_NAME,
FateId.from(type, 17).canonical(),
- "");
+ FateId userCompactFateId = FateId.from(type, UUID.randomUUID());
+ mutation.put(UserCompactionRequestedColumnFamily.STR_NAME,
userCompactFateId.canonical(), "");
var unsplittableMeta =
UnSplittableMetadata.toUnSplittable(extent, 100, 110, 120, Set.of(sf1,
sf2));
SplitColumnFamily.UNSPLITTABLE_COLUMN.put(mutation, new
Value(unsplittableMeta.toBase64()));
@@ -156,7 +156,7 @@ public class TabletMetadataTest {
tm.getFileSize());
assertEquals(6L, tm.getFlushId().getAsLong());
assertEquals(rowMap, tm.getKeyValues());
- assertEquals(Map.of(new StoredTabletFile(bf1), fateId56L, new
StoredTabletFile(bf2), fateId59L),
+ assertEquals(Map.of(new StoredTabletFile(bf1), fateId1, new
StoredTabletFile(bf2), fateId2),
tm.getLoaded());
assertEquals(HostAndPort.fromParts("server1", 8555),
tm.getLocation().getHostAndPort());
assertEquals("s001", tm.getLocation().getSession());
@@ -172,7 +172,7 @@ public class TabletMetadataTest {
assertEquals("M123456789", tm.getTime().encode());
assertEquals(Set.of(sf1, sf2), Set.copyOf(tm.getScans()));
assertTrue(tm.hasMerged());
- assertTrue(tm.getUserCompactionsRequested().contains(FateId.from(type,
17)));
+ assertTrue(tm.getUserCompactionsRequested().contains(userCompactFateId));
assertEquals(unsplittableMeta, tm.getUnSplittable());
}
@@ -344,19 +344,19 @@ public class TabletMetadataTest {
public void testCompactionRequestedColumn() {
KeyExtent extent = new KeyExtent(TableId.of("5"), new Text("df"), new
Text("da"));
FateInstanceType type = FateInstanceType.fromTableId(extent.tableId());
+ FateId userCompactFateId1 = FateId.from(type, UUID.randomUUID());
+ FateId userCompactFateId2 = FateId.from(type, UUID.randomUUID());
// Test column set
Mutation mutation = TabletColumnFamily.createPrevRowMutation(extent);
- mutation.put(UserCompactionRequestedColumnFamily.STR_NAME,
FateId.from(type, 17).canonical(),
- "");
- mutation.put(UserCompactionRequestedColumnFamily.STR_NAME,
FateId.from(type, 18).canonical(),
- "");
+ mutation.put(UserCompactionRequestedColumnFamily.STR_NAME,
userCompactFateId1.canonical(), "");
+ mutation.put(UserCompactionRequestedColumnFamily.STR_NAME,
userCompactFateId2.canonical(), "");
TabletMetadata tm =
TabletMetadata.convertRow(toRowMap(mutation).entrySet().iterator(),
EnumSet.of(USER_COMPACTION_REQUESTED), true, false);
assertEquals(2, tm.getUserCompactionsRequested().size());
- assertTrue(tm.getUserCompactionsRequested().contains(FateId.from(type,
17)));
- assertTrue(tm.getUserCompactionsRequested().contains(FateId.from(type,
18)));
+ assertTrue(tm.getUserCompactionsRequested().contains(userCompactFateId1));
+ assertTrue(tm.getUserCompactionsRequested().contains(userCompactFateId2));
// Column not set
mutation = TabletColumnFamily.createPrevRowMutation(extent);
@@ -503,11 +503,16 @@ public class TabletMetadataTest {
StoredTabletFile sf4 =
new ReferencedTabletFile(new
Path("hdfs://nn1/acc/tables/1/t-0001/sf4.rf")).insert();
+ FateId loadedFateId1 = FateId.from(type, UUID.randomUUID());
+ FateId loadedFateId2 = FateId.from(type, UUID.randomUUID());
+ FateId compactFateId1 = FateId.from(type, UUID.randomUUID());
+ FateId compactFateId2 = FateId.from(type, UUID.randomUUID());
+
TabletMetadata tm = TabletMetadata.builder(extent)
.putTabletAvailability(TabletAvailability.UNHOSTED).putLocation(Location.future(ser1))
- .putFile(sf1, dfv1).putFile(sf2, dfv2).putBulkFile(rf1,
FateId.from(type, 25))
- .putBulkFile(rf2, FateId.from(type,
35)).putFlushId(27).putDirName("dir1").putScan(sf3)
- .putScan(sf4).putCompacted(FateId.from(type,
17)).putCompacted(FateId.from(type, 23))
+ .putFile(sf1, dfv1).putFile(sf2, dfv2).putBulkFile(rf1, loadedFateId1)
+ .putBulkFile(rf2,
loadedFateId2).putFlushId(27).putDirName("dir1").putScan(sf3).putScan(sf4)
+ .putCompacted(compactFateId1).putCompacted(compactFateId2)
.build(ECOMP, HOSTING_REQUESTED, MERGED, USER_COMPACTION_REQUESTED,
UNSPLITTABLE);
assertEquals(extent, tm.getExtent());
@@ -517,12 +522,11 @@ public class TabletMetadataTest {
assertEquals(Map.of(sf1, dfv1, sf2, dfv2), tm.getFilesMap());
assertEquals(tm.getFilesMap().values().stream().mapToLong(DataFileValue::getSize).sum(),
tm.getFileSize());
- assertEquals(Map.of(rf1.insert(), FateId.from(type, 25L), rf2.insert(),
FateId.from(type, 35L)),
- tm.getLoaded());
+ assertEquals(Map.of(rf1.insert(), loadedFateId1, rf2.insert(),
loadedFateId2), tm.getLoaded());
assertEquals("dir1", tm.getDirName());
assertEquals(Set.of(sf3, sf4), Set.copyOf(tm.getScans()));
assertEquals(Set.of(), tm.getExternalCompactions().keySet());
- assertEquals(Set.of(FateId.from(type, 17L), FateId.from(type, 23L)),
tm.getCompacted());
+ assertEquals(Set.of(compactFateId1, compactFateId2), tm.getCompacted());
assertFalse(tm.getHostingRequested());
assertTrue(tm.getUserCompactionsRequested().isEmpty());
assertFalse(tm.hasMerged());
@@ -532,7 +536,7 @@ public class TabletMetadataTest {
assertThrows(IllegalStateException.class, tm::getTime);
TabletOperationId opid1 =
- TabletOperationId.from(TabletOperationType.SPLITTING,
FateId.from(type, 55));
+ TabletOperationId.from(TabletOperationType.SPLITTING,
FateId.from(type, UUID.randomUUID()));
TabletMetadata tm2 =
TabletMetadata.builder(extent).putOperation(opid1).build(LOCATION);
assertEquals(extent, tm2.getExtent());
@@ -555,21 +559,22 @@ public class TabletMetadataTest {
assertThrows(IllegalStateException.class, tm2::getUnSplittable);
var ecid1 = ExternalCompactionId.generate(UUID.randomUUID());
- CompactionMetadata ecm = new CompactionMetadata(Set.of(sf1, sf2), rf1,
"cid1",
- CompactionKind.USER, (short) 3, CompactorGroupId.of("Q1"), true,
FateId.from(type, 99L));
+ CompactionMetadata ecm =
+ new CompactionMetadata(Set.of(sf1, sf2), rf1, "cid1",
CompactionKind.USER, (short) 3,
+ CompactorGroupId.of("Q1"), true, FateId.from(type,
UUID.randomUUID()));
LogEntry le1 = LogEntry.fromPath("localhost+8020/" + UUID.randomUUID());
LogEntry le2 = LogEntry.fromPath("localhost+8020/" + UUID.randomUUID());
- SelectedFiles selFiles = new SelectedFiles(Set.of(sf1, sf4), false,
FateId.from(type, 159L));
+ FateId selFilesFateId = FateId.from(type, UUID.randomUUID());
+ SelectedFiles selFiles = new SelectedFiles(Set.of(sf1, sf4), false,
selFilesFateId);
var unsplittableMeta =
UnSplittableMetadata.toUnSplittable(extent, 100, 110, 120, Set.of(sf1,
sf2));
TabletMetadata tm3 =
TabletMetadata.builder(extent).putExternalCompaction(ecid1, ecm)
.putSuspension(ser1, 45L).putTime(new MetadataTime(479,
TimeType.LOGICAL)).putWal(le1)
.putWal(le2).setHostingRequested().putSelectedFiles(selFiles).setMerged()
- .putUserCompactionRequested(FateId.from(type,
159L)).setUnSplittable(unsplittableMeta)
- .build();
+
.putUserCompactionRequested(selFilesFateId).setUnSplittable(unsplittableMeta).build();
assertEquals(Set.of(ecid1), tm3.getExternalCompactions().keySet());
assertEquals(Set.of(sf1, sf2),
tm3.getExternalCompactions().get(ecid1).getJobFiles());
@@ -580,11 +585,11 @@ public class TabletMetadataTest {
assertEquals(Stream.of(le1, le2).map(LogEntry::toString).collect(toSet()),
tm3.getLogs().stream().map(LogEntry::toString).collect(toSet()));
assertEquals(Set.of(sf1, sf4), tm3.getSelectedFiles().getFiles());
- assertEquals(FateId.from(type, 159L), tm3.getSelectedFiles().getFateId());
+ assertEquals(selFilesFateId, tm3.getSelectedFiles().getFateId());
assertFalse(tm3.getSelectedFiles().initiallySelectedAll());
assertEquals(selFiles.getMetadataValue(),
tm3.getSelectedFiles().getMetadataValue());
assertTrue(tm3.hasMerged());
- assertTrue(tm3.getUserCompactionsRequested().contains(FateId.from(type,
159L)));
+ assertTrue(tm3.getUserCompactionsRequested().contains(selFilesFateId));
assertEquals(unsplittableMeta, tm3.getUnSplittable());
}
diff --git
a/server/base/src/main/java/org/apache/accumulo/server/compaction/CompactionConfigStorage.java
b/server/base/src/main/java/org/apache/accumulo/server/compaction/CompactionConfigStorage.java
index da0233a215..0b0bedb645 100644
---
a/server/base/src/main/java/org/apache/accumulo/server/compaction/CompactionConfigStorage.java
+++
b/server/base/src/main/java/org/apache/accumulo/server/compaction/CompactionConfigStorage.java
@@ -42,11 +42,11 @@ import org.apache.zookeeper.KeeperException;
import com.google.common.base.Preconditions;
public class CompactionConfigStorage {
- static final String DELIMITER = "-";
+ static final String DELIMITER = "_";
private static String createPath(ServerContext context, FateId fateId) {
return context.getZooKeeperRoot() + Constants.ZCOMPACTIONS + "/" +
fateId.getType() + DELIMITER
- + fateId.getHexTid();
+ + fateId.getTxUUIDStr();
}
public static byte[] encodeConfig(CompactionConfig config, TableId tableId) {
diff --git
a/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
b/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
index 5f3132acd3..b36fb31583 100644
---
a/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
+++
b/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
@@ -28,6 +28,7 @@ import java.lang.reflect.Method;
import java.util.Base64;
import java.util.List;
import java.util.Set;
+import java.util.UUID;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
@@ -147,15 +148,15 @@ public class MetadataConstraintsTest {
MetadataConstraints mc = new MetadataConstraints();
Mutation m;
List<Short> violations;
- FateId fateId5L = FateId.from(FateInstanceType.META, 5L);
- FateId fateId7L = FateId.from(FateInstanceType.META, 7L);
+ FateId fateId1 = FateId.from(FateInstanceType.META, UUID.randomUUID());
+ FateId fateId2 = FateId.from(FateInstanceType.META, UUID.randomUUID());
// loaded marker w/ file
m = new Mutation(new Text("0;foo"));
m.put(
BulkFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
m.put(
DataFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
@@ -168,7 +169,7 @@ public class MetadataConstraintsTest {
m.put(
BulkFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
assertViolation(mc, m, (short) 8);
// two files w/ same txid
@@ -176,7 +177,7 @@ public class MetadataConstraintsTest {
m.put(
BulkFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
m.put(
DataFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
@@ -184,7 +185,7 @@ public class MetadataConstraintsTest {
m.put(
BulkFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2")).getMetadataText(),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
m.put(
DataFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2")).getMetadataText(),
@@ -197,7 +198,7 @@ public class MetadataConstraintsTest {
m.put(
BulkFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
m.put(
DataFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
@@ -205,7 +206,7 @@ public class MetadataConstraintsTest {
m.put(
BulkFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2")).getMetadataText(),
- new Value(fateId7L.canonical()));
+ new Value(fateId2.canonical()));
m.put(
DataFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2")).getMetadataText(),
@@ -217,7 +218,7 @@ public class MetadataConstraintsTest {
m.put(
BulkFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
m.put(
DataFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
@@ -225,7 +226,7 @@ public class MetadataConstraintsTest {
m.put(
BulkFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2")).getMetadataText(),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
assertViolation(mc, m, (short) 8);
// mutation that looks like split
@@ -233,7 +234,7 @@ public class MetadataConstraintsTest {
m.put(
BulkFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1"));
violations = mc.check(createEnv(), m);
assertNull(violations);
@@ -243,7 +244,7 @@ public class MetadataConstraintsTest {
m.put(
BulkFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
m.put(CurrentLocationColumnFamily.NAME, new Text("789"), new
Value("127.0.0.1:9997"));
violations = mc.check(createEnv(), m);
assertNull(violations);
@@ -261,7 +262,7 @@ public class MetadataConstraintsTest {
new Text(StoredTabletFile.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"))
.getMetadata()
.replace("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile",
"/someFile")),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
assertViolation(mc, m, (short) 12);
// Missing tables directory in path
@@ -270,20 +271,20 @@ public class MetadataConstraintsTest {
new Text(StoredTabletFile.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"))
.getMetadata().replace("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile",
"hdfs://1.2.3.4/accumulo/2a/t-0003/someFile")),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
assertViolation(mc, m, (short) 12);
m = new Mutation(new Text("0;foo"));
m.put(
BulkFileColumnFamily.NAME, StoredTabletFile
.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
assertViolation(mc, m, (short) 8);
// Bad Json - only path (old format) so should fail parsing
m = new Mutation(new Text("0;foo"));
m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
assertViolation(mc, m, (short) 12);
// Bad Json - test startRow key is missing so validation should fail
@@ -292,7 +293,7 @@ public class MetadataConstraintsTest {
m.put(BulkFileColumnFamily.NAME,
new Text(
"{\"path\":\"hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile\",\"endRow\":\"\"}"),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
assertViolation(mc, m, (short) 12);
// Bad Json - test path key replaced with empty string so validation
should fail
@@ -301,7 +302,7 @@ public class MetadataConstraintsTest {
m.put(
BulkFileColumnFamily.NAME, new Text(StoredTabletFile
.serialize("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile").replace("path",
"")),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
assertViolation(mc, m, (short) 12);
// Bad Json - test path value missing
@@ -310,7 +311,7 @@ public class MetadataConstraintsTest {
m.put(BulkFileColumnFamily.NAME,
new Text(StoredTabletFile.of(new
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"))
.getMetadata().replaceFirst("\"path\":\".*\",\"startRow",
"\"path\":\"\",\"startRow")),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
assertViolation(mc, m, (short) 12);
// Bad Json - test startRow key replaced with empty string so validation
should fail
@@ -318,7 +319,7 @@ public class MetadataConstraintsTest {
m = new Mutation(new Text("0;foo"));
m.put(BulkFileColumnFamily.NAME, new Text(StoredTabletFile
.serialize("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile").replace("startRow",
"")),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
assertViolation(mc, m, (short) 12);
// Bad Json - test endRow key missing so validation should fail
@@ -326,7 +327,7 @@ public class MetadataConstraintsTest {
m.put(
BulkFileColumnFamily.NAME, new Text(StoredTabletFile
.serialize("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile").replace("endRow",
"")),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
assertViolation(mc, m, (short) 12);
// Bad Json - endRow will be replaced with encoded row without the
exclusive byte 0x00 which is
@@ -337,7 +338,7 @@ public class MetadataConstraintsTest {
.of(new Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"),
new Range("a", "b"))
.getMetadata().replaceFirst("\"endRow\":\".*\"",
"\"endRow\":\"" + encodeRowForMetadata("bad") + "\"")),
- new Value(fateId5L.canonical()));
+ new Value(fateId1.canonical()));
assertViolation(mc, m, (short) 12);
}
@@ -468,7 +469,8 @@ public class MetadataConstraintsTest {
assertViolation(mc, m, (short) 9);
m = new Mutation(new Text("0;foo"));
- ServerColumnFamily.OPID_COLUMN.put(m, new
Value("MERGING:FATE:META:123abc"));
+ ServerColumnFamily.OPID_COLUMN.put(m,
+ new Value("MERGING:FATE:META:12345678-9abc-def1-2345-6789abcdef12"));
violations = mc.check(createEnv(), m);
assertNull(violations);
}
@@ -478,7 +480,7 @@ public class MetadataConstraintsTest {
MetadataConstraints mc = new MetadataConstraints();
Mutation m;
List<Short> violations;
- FateId fateId = FateId.from(FateInstanceType.META, 42L);
+ FateId fateId = FateId.from(FateInstanceType.META, UUID.randomUUID());
m = new Mutation(new Text("0;foo"));
ServerColumnFamily.SELECTED_COLUMN.put(m, new Value("bad id"));
@@ -512,7 +514,7 @@ public class MetadataConstraintsTest {
MetadataConstraints mc = new MetadataConstraints();
Mutation m;
List<Short> violations;
- FateId fateId = FateId.from(FateInstanceType.META, 45L);
+ FateId fateId = FateId.from(FateInstanceType.META, UUID.randomUUID());
m = new Mutation(new Text("0;foo"));
m.put(column, fateId.canonical(), "");
diff --git
a/server/base/src/test/java/org/apache/accumulo/server/manager/state/TabletManagementTest.java
b/server/base/src/test/java/org/apache/accumulo/server/manager/state/TabletManagementTest.java
index 49729c6d4b..9bc99cd0c4 100644
---
a/server/base/src/test/java/org/apache/accumulo/server/manager/state/TabletManagementTest.java
+++
b/server/base/src/test/java/org/apache/accumulo/server/manager/state/TabletManagementTest.java
@@ -73,8 +73,8 @@ public class TabletManagementTest {
Mutation mutation = TabletColumnFamily.createPrevRowMutation(extent);
FateInstanceType type = FateInstanceType.fromTableId(extent.tableId());
- FateId fateId56L = FateId.from(type, 56L);
- FateId fateId59L = FateId.from(type, 59L);
+ FateId fateId1 = FateId.from(type, UUID.randomUUID());
+ FateId fateId2 = FateId.from(type, UUID.randomUUID());
DIRECTORY_COLUMN.put(mutation, new Value("t-0001757"));
FLUSH_COLUMN.put(mutation, new Value("6"));
@@ -85,9 +85,9 @@ public class TabletManagementTest {
StoredTabletFile bf2 =
new ReferencedTabletFile(new
Path("hdfs://nn1/acc/tables/1/t-0001/bf2")).insert();
mutation.at().family(BulkFileColumnFamily.NAME).qualifier(bf1.getMetadata())
- .put(fateId56L.canonical());
+ .put(fateId1.canonical());
mutation.at().family(BulkFileColumnFamily.NAME).qualifier(bf2.getMetadata())
- .put(fateId59L.canonical());
+ .put(fateId2.canonical());
mutation.at().family(ClonedColumnFamily.NAME).qualifier("").put("OK");
diff --git
a/server/base/src/test/java/org/apache/accumulo/server/util/fateCommand/SummaryReportTest.java
b/server/base/src/test/java/org/apache/accumulo/server/util/fateCommand/SummaryReportTest.java
index 4618715fbb..8b880b8767 100644
---
a/server/base/src/test/java/org/apache/accumulo/server/util/fateCommand/SummaryReportTest.java
+++
b/server/base/src/test/java/org/apache/accumulo/server/util/fateCommand/SummaryReportTest.java
@@ -29,6 +29,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.apache.accumulo.core.fate.AdminUtil;
@@ -73,7 +74,7 @@ class SummaryReportTest {
expect(status1.getStatus()).andReturn(ReadOnlyFateStore.TStatus.IN_PROGRESS).anyTimes();
expect(status1.getTop()).andReturn(null).anyTimes();
expect(status1.getTxName()).andReturn(null).anyTimes();
-
expect(status1.getFateId()).andReturn(FateId.from("FATE:USER:abcdabcd")).anyTimes();
+ expect(status1.getFateId()).andReturn(FateId.from("FATE:USER:" +
UUID.randomUUID())).anyTimes();
expect(status1.getHeldLocks()).andReturn(List.of()).anyTimes();
expect(status1.getWaitingLocks()).andReturn(List.of()).anyTimes();
diff --git
a/server/base/src/test/java/org/apache/accumulo/server/util/fateCommand/TxnDetailsTest.java
b/server/base/src/test/java/org/apache/accumulo/server/util/fateCommand/TxnDetailsTest.java
index 35be83fce8..fb3d77a706 100644
---
a/server/base/src/test/java/org/apache/accumulo/server/util/fateCommand/TxnDetailsTest.java
+++
b/server/base/src/test/java/org/apache/accumulo/server/util/fateCommand/TxnDetailsTest.java
@@ -30,6 +30,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
+import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.apache.accumulo.core.fate.AdminUtil;
@@ -47,6 +48,9 @@ class TxnDetailsTest {
void orderingByDuration() {
Map<String,String> idMap = Map.of("1", "ns1", "2", "tbl1");
+ UUID uuid1 = UUID.randomUUID();
+ UUID uuid2 = UUID.randomUUID();
+
long now = System.currentTimeMillis();
AdminUtil.TransactionStatus status1 =
createMock(AdminUtil.TransactionStatus.class);
@@ -54,7 +58,7 @@ class TxnDetailsTest {
expect(status1.getStatus()).andReturn(ReadOnlyFateStore.TStatus.IN_PROGRESS).anyTimes();
expect(status1.getTop()).andReturn("step1").anyTimes();
expect(status1.getTxName()).andReturn("runningTx1").anyTimes();
-
expect(status1.getFateId()).andReturn(FateId.from("FATE:USER:abcdabcd")).anyTimes();
+ expect(status1.getFateId()).andReturn(FateId.from("FATE:USER:" +
uuid1)).anyTimes();
expect(status1.getHeldLocks()).andReturn(List.of()).anyTimes();
expect(status1.getWaitingLocks()).andReturn(List.of()).anyTimes();
@@ -63,7 +67,7 @@ class TxnDetailsTest {
expect(status2.getStatus()).andReturn(ReadOnlyFateStore.TStatus.IN_PROGRESS).anyTimes();
expect(status2.getTop()).andReturn("step2").anyTimes();
expect(status2.getTxName()).andReturn("runningTx2").anyTimes();
-
expect(status2.getFateId()).andReturn(FateId.from("FATE:USER:123456789")).anyTimes();
+ expect(status2.getFateId()).andReturn(FateId.from("FATE:USER:" +
uuid2)).anyTimes();
expect(status2.getHeldLocks()).andReturn(List.of()).anyTimes();
expect(status2.getWaitingLocks()).andReturn(List.of()).anyTimes();
@@ -80,8 +84,8 @@ class TxnDetailsTest {
Iterator<FateTxnDetails> itor = sorted.iterator();
- assertTrue(itor.next().toString().contains("123456789"));
- assertTrue(itor.next().toString().contains("abcdabcd"));
+ assertTrue(itor.next().toString().contains(uuid2.toString()));
+ assertTrue(itor.next().toString().contains(uuid1.toString()));
verify(status1, status2);
}
@@ -97,7 +101,7 @@ class TxnDetailsTest {
expect(status1.getStatus()).andReturn(ReadOnlyFateStore.TStatus.IN_PROGRESS).anyTimes();
expect(status1.getTop()).andReturn("step1").anyTimes();
expect(status1.getTxName()).andReturn("runningTx").anyTimes();
-
expect(status1.getFateId()).andReturn(FateId.from("FATE:USER:abcdabcd")).anyTimes();
+ expect(status1.getFateId()).andReturn(FateId.from("FATE:USER:" +
UUID.randomUUID())).anyTimes();
// incomplete lock info (W unknown ns id, no table))
expect(status1.getHeldLocks()).andReturn(List.of("R:1", "R:2",
"W:a")).anyTimes();
// blank names
diff --git
a/server/manager/src/main/java/org/apache/accumulo/manager/FateServiceHandler.java
b/server/manager/src/main/java/org/apache/accumulo/manager/FateServiceHandler.java
index a530d80a15..5c2ae952d0 100644
---
a/server/manager/src/main/java/org/apache/accumulo/manager/FateServiceHandler.java
+++
b/server/manager/src/main/java/org/apache/accumulo/manager/FateServiceHandler.java
@@ -127,7 +127,7 @@ class FateServiceHandler implements FateService.Iface {
throws ThriftSecurityException {
authenticate(credentials);
return new TFateId(type,
-
manager.fate(FateInstanceType.fromThrift(type)).startTransaction().getTid());
+
manager.fate(FateInstanceType.fromThrift(type)).startTransaction().getTxUUIDStr());
}
@Override
@@ -136,9 +136,9 @@ class FateServiceHandler implements FateService.Iface {
throws ThriftSecurityException, ThriftTableOperationException,
ThriftPropertyException {
authenticate(c);
String goalMessage = op.toString() + " ";
- long tid = opid.getTid();
+ String txUUIDStr = opid.getTxUUIDStr();
FateInstanceType type = FateInstanceType.fromThrift(opid.getType());
- FateId fateId = FateId.from(type, tid);
+ FateId fateId = FateId.from(type, txUUIDStr);
switch (op) {
case NAMESPACE_CREATE: {
@@ -949,7 +949,7 @@ class FateServiceHandler implements FateService.Iface {
public Path mkTempDir(TFateId opid) throws IOException {
Volume vol = manager.getVolumeManager().getFirst();
FateId fateId = FateId.fromThrift(opid);
- Path p = vol.prefixChild("/tmp/fate-" + fateId.getType() + "-" +
fateId.getHexTid());
+ Path p = vol.prefixChild("/tmp/fate-" + fateId.getType() + "-" +
fateId.getTxUUIDStr());
FileSystem fs = vol.getFileSystem();
if (fs.exists(p)) {
fs.delete(p, true);
diff --git
a/server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/CompactionCoordinator.java
b/server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/CompactionCoordinator.java
index 97397c019b..174d741abb 100644
---
a/server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/CompactionCoordinator.java
+++
b/server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/CompactionCoordinator.java
@@ -42,6 +42,7 @@ import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
+import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
@@ -584,7 +585,7 @@ public class CompactionCoordinator
}).collect(toList());
FateInstanceType type =
FateInstanceType.fromTableId(metaJob.getTabletMetadata().getTableId());
- FateId fateId = FateId.from(type, 0);
+ FateId fateId = FateId.from(type, UUID.randomUUID());
if (metaJob.getJob().getKind() == CompactionKind.USER) {
fateId = metaJob.getTabletMetadata().getSelectedFiles().getFateId();
}
diff --git
a/server/manager/src/test/java/org/apache/accumulo/manager/compaction/CompactionCoordinatorTest.java
b/server/manager/src/test/java/org/apache/accumulo/manager/compaction/CompactionCoordinatorTest.java
index 4c0b2b1d52..3619215985 100644
---
a/server/manager/src/test/java/org/apache/accumulo/manager/compaction/CompactionCoordinatorTest.java
+++
b/server/manager/src/test/java/org/apache/accumulo/manager/compaction/CompactionCoordinatorTest.java
@@ -179,7 +179,7 @@ public class CompactionCoordinatorTest {
Set<StoredTabletFile> jobFiles, TabletMetadata tablet, String
compactorAddress,
ExternalCompactionId externalCompactionId) {
FateInstanceType type =
FateInstanceType.fromTableId(tablet.getExtent().tableId());
- FateId fateId = FateId.from(type, 1L);
+ FateId fateId = FateId.from(type, UUID.randomUUID());
return new CompactionMetadata(jobFiles,
new ReferencedTabletFile(new
Path("file:///accumulo/tables/1/default_tablet/F00001.rf")),
compactorAddress, job.getKind(), job.getPriority(), job.getGroup(),
true, fateId);
@@ -195,7 +195,7 @@ public class CompactionCoordinatorTest {
TCompactionKind.valueOf(ecm.getKind().name()),
FateId
.from(FateInstanceType.fromTableId(metaJob.getTabletMetadata().getExtent().tableId()),
- 1L)
+ UUID.randomUUID())
.toThrift(),
Map.of());
}
@@ -402,7 +402,7 @@ public class CompactionCoordinatorTest {
EasyMock.expect(context.getTableState(tableId1)).andReturn(TableState.ONLINE).atLeastOnce();
EasyMock.expect(context.getTableState(tableId2)).andReturn(TableState.OFFLINE).atLeastOnce();
- FateId fateId1 = FateId.from(FateInstanceType.USER, 1234L);
+ FateId fateId1 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
CompactorGroupId cgid = CompactorGroupId.of("G1");
ReferencedTabletFile tmp1 =
diff --git
a/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/ShutdownTServerTest.java
b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/ShutdownTServerTest.java
index f73a94d170..874e7424e6 100644
---
a/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/ShutdownTServerTest.java
+++
b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/ShutdownTServerTest.java
@@ -24,6 +24,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Collections;
import java.util.HashMap;
+import java.util.UUID;
import org.apache.accumulo.core.fate.FateId;
import org.apache.accumulo.core.fate.FateInstanceType;
@@ -50,7 +51,7 @@ public class ShutdownTServerTest {
final ShutdownTServer op = new ShutdownTServer(tserver, force);
final Manager manager = EasyMock.createMock(Manager.class);
- final FateId fateId = FateId.from(FateInstanceType.USER, 1L);
+ final FateId fateId = FateId.from(FateInstanceType.USER,
UUID.randomUUID());
final TServerConnection tserverCnxn =
EasyMock.createMock(TServerConnection.class);
final TabletServerStatus status = new TabletServerStatus();
diff --git
a/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/merge/MergeTabletsTest.java
b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/merge/MergeTabletsTest.java
index eae4dff9a6..9c52e7f51e 100644
---
a/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/merge/MergeTabletsTest.java
+++
b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/merge/MergeTabletsTest.java
@@ -90,7 +90,7 @@ import org.junit.jupiter.api.Test;
public class MergeTabletsTest {
private static final TableId tableId = TableId.of("789");
- private static final FateId fateId = FateId.from(FateInstanceType.USER,
1234L);
+ private static final FateId fateId = FateId.from(FateInstanceType.USER,
UUID.randomUUID());
private static final TabletOperationId opid =
TabletOperationId.from(TabletOperationType.MERGING, fateId);
@@ -296,7 +296,7 @@ public class MergeTabletsTest {
var currLoc = TabletMetadata.Location.current(tserver);
testUnexpectedColumn(tmb -> tmb.putLocation(currLoc), "had location",
currLoc.toString());
- var otherFateId = FateId.from(FateInstanceType.USER, 4321L);
+ var otherFateId = FateId.from(FateInstanceType.USER, UUID.randomUUID());
var otherOpid = TabletOperationId.from(TabletOperationType.MERGING,
otherFateId);
testUnexpectedColumn(tmb -> tmb.putOperation(otherOpid), "had unexpected
opid",
otherOpid.toString());
diff --git
a/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java
b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java
index aec3835875..1e9c018785 100644
---
a/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java
+++
b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java
@@ -193,8 +193,8 @@ public class UpdateTabletsTest {
var loaded1 = newSTF(5);
var loaded2 = newSTF(6);
- var flid1 = FateId.from(FateInstanceType.USER, 11L);
- var flid2 = FateId.from(FateInstanceType.USER, 22L);
+ var flid1 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
+ var flid2 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
var loaded = Map.of(loaded1, flid1, loaded2, flid2);
var dfv1 = new DataFileValue(1000, 100, 20);
@@ -208,7 +208,7 @@ public class UpdateTabletsTest {
var cid2 = ExternalCompactionId.generate(UUID.randomUUID());
var cid3 = ExternalCompactionId.generate(UUID.randomUUID());
- var fateId = FateId.from(FateInstanceType.USER, 42L);
+ var fateId = FateId.from(FateInstanceType.USER, UUID.randomUUID());
var opid = TabletOperationId.from(TabletOperationType.SPLITTING, fateId);
var tabletTime = MetadataTime.parse("L30");
var flushID = OptionalLong.of(40);
@@ -248,11 +248,11 @@ public class UpdateTabletsTest {
SelectedFiles selectedFiles = EasyMock.mock(SelectedFiles.class);
EasyMock.expect(selectedFiles.getFateId()).andReturn(null);
EasyMock.expect(tabletMeta.getSelectedFiles()).andReturn(selectedFiles).atLeastOnce();
- FateId ucfid1 = FateId.from(FateInstanceType.USER, 55L);
- FateId ucfid2 = FateId.from(FateInstanceType.USER, 66L);
+ FateId ucfid1 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
+ FateId ucfid2 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
EasyMock.expect(tabletMeta.getUserCompactionsRequested()).andReturn(Set.of(ucfid1,
ucfid2))
.atLeastOnce();
- FateId ucfid3 = FateId.from(FateInstanceType.USER, 77L);
+ FateId ucfid3 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
EasyMock.expect(tabletMeta.getCompacted()).andReturn(Set.of(ucfid1,
ucfid3)).atLeastOnce();
EasyMock.expect(tabletMeta.getScans()).andReturn(List.of(file1,
file2)).atLeastOnce();
EasyMock.expect(tabletMeta.getTime()).andReturn(tabletTime).atLeastOnce();
@@ -376,7 +376,7 @@ public class UpdateTabletsTest {
TableId tableId = TableId.of("123");
KeyExtent origExtent = new KeyExtent(tableId, new Text("m"), null);
- var fateId = FateId.from(FateInstanceType.USER, 42L);
+ var fateId = FateId.from(FateInstanceType.USER, UUID.randomUUID());
var opid = TabletOperationId.from(TabletOperationType.SPLITTING, fateId);
// Test splitting a tablet with a location
@@ -393,7 +393,7 @@ public class UpdateTabletsTest {
assertTrue(e.getMessage().contains("null"));
// Test splitting a tablet with an unexpected operation id
- var fateId2 = FateId.from(FateInstanceType.USER, 24L);
+ var fateId2 = FateId.from(FateInstanceType.USER, UUID.randomUUID());
var opid2 = TabletOperationId.from(TabletOperationType.SPLITTING, fateId2);
var tablet3 =
TabletMetadata.builder(origExtent).putOperation(opid2).build();
e = assertThrows(IllegalStateException.class, () -> testError(origExtent,
tablet3, fateId));
diff --git a/test/src/main/java/org/apache/accumulo/test/ScanServerIT.java
b/test/src/main/java/org/apache/accumulo/test/ScanServerIT.java
index 3ecb58bd6c..8d1270ab31 100644
--- a/test/src/main/java/org/apache/accumulo/test/ScanServerIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ScanServerIT.java
@@ -34,6 +34,7 @@ import java.util.Objects;
import java.util.Properties;
import java.util.SortedSet;
import java.util.TreeSet;
+import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -270,7 +271,7 @@ public class ScanServerIT extends SharedMiniClusterBase {
// Set operationIds on all the table's tablets so that they won't be
loaded.
FateInstanceType type = FateInstanceType.fromTableId(tid);
- FateId fateId = FateId.from(type, 1234L);
+ FateId fateId = FateId.from(type, UUID.randomUUID());
TabletOperationId opid =
TabletOperationId.from(TabletOperationType.SPLITTING, fateId);
Ample ample = getCluster().getServerContext().getAmple();
ServerAmpleImpl sai = (ServerAmpleImpl) ample;
diff --git
a/test/src/main/java/org/apache/accumulo/test/fate/accumulo/AccumuloFateIT.java
b/test/src/main/java/org/apache/accumulo/test/fate/accumulo/AccumuloFateIT.java
index eb3743326e..e4a944c41f 100644
---
a/test/src/main/java/org/apache/accumulo/test/fate/accumulo/AccumuloFateIT.java
+++
b/test/src/main/java/org/apache/accumulo/test/fate/accumulo/AccumuloFateIT.java
@@ -78,6 +78,6 @@ public class AccumuloFateIT extends FateIT {
}
private static Range getRow(FateId fateId) {
- return new Range("tx_" + fateId.getHexTid());
+ return new Range("tx_" + fateId.getTxUUIDStr());
}
}
diff --git
a/test/src/main/java/org/apache/accumulo/test/fate/accumulo/AccumuloStoreIT.java
b/test/src/main/java/org/apache/accumulo/test/fate/accumulo/AccumuloStoreIT.java
index cb38235242..0430abfc01 100644
---
a/test/src/main/java/org/apache/accumulo/test/fate/accumulo/AccumuloStoreIT.java
+++
b/test/src/main/java/org/apache/accumulo/test/fate/accumulo/AccumuloStoreIT.java
@@ -24,9 +24,10 @@ import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Iterator;
+import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
-import java.util.TreeSet;
+import java.util.UUID;
import java.util.stream.Collectors;
import org.apache.accumulo.core.client.Accumulo;
@@ -93,7 +94,7 @@ public class AccumuloStoreIT extends SharedMiniClusterBase {
if (fateIdIterator.hasNext()) {
return fateIdIterator.next();
} else {
- return FateId.from(fateInstanceType, -1L);
+ return FateId.from(fateInstanceType, UUID.randomUUID());
}
}
@@ -155,10 +156,16 @@ public class AccumuloStoreIT extends
SharedMiniClusterBase {
(ClientContext) Accumulo.newClient().from(getClientProps()).build()) {
createFateTable(client, table);
- List<Long> txids = List.of(1L, 1L, 1L, 2L, 3L, 3L, 3L, 3L, 4L, 4L, 5L,
5L, 5L, 5L, 5L, 5L);
+ UUID[] uuids = new UUID[5];
+ for (int i = 0; i < uuids.length; i++) {
+ uuids[i] = UUID.randomUUID();
+ }
+ List<UUID> txids =
+ List.of(uuids[0], uuids[0], uuids[0], uuids[1], uuids[2], uuids[2],
uuids[2], uuids[2],
+ uuids[3], uuids[3], uuids[4], uuids[4], uuids[4], uuids[4],
uuids[4], uuids[4]);
List<FateId> fateIds = txids.stream().map(txid ->
FateId.from(fateInstanceType, txid))
.collect(Collectors.toList());
- Set<FateId> expectedFateIds = new TreeSet<>(fateIds);
+ Set<FateId> expectedFateIds = new LinkedHashSet<>(fateIds);
TestAccumuloStore store = new TestAccumuloStore(client, table, fateIds);
// call create and expect we get the unique txids
@@ -187,7 +194,7 @@ public class AccumuloStoreIT extends SharedMiniClusterBase {
client = (ClientContext)
Accumulo.newClient().from(getClientProps()).build();
tableName = getUniqueNames(1)[0];
createFateTable(client, tableName);
- fateId = FateId.from(fateInstanceType, 1L);
+ fateId = FateId.from(fateInstanceType, UUID.randomUUID());
store = new TestAccumuloStore(client, tableName, List.of(fateId));
store.create();
txStore = store.reserve(fateId);
@@ -250,7 +257,7 @@ public class AccumuloStoreIT extends SharedMiniClusterBase {
private void injectStatus(ClientContext client, String table, FateId fateId,
TStatus status)
throws TableNotFoundException {
try (BatchWriter writer = client.createBatchWriter(table)) {
- Mutation mutation = new Mutation(new Text("tx_" + fateId.getHexTid()));
+ Mutation mutation = new Mutation(new Text("tx_" +
fateId.getTxUUIDStr()));
FateSchema.TxColumnFamily.STATUS_COLUMN.put(mutation, new
Value(status.name()));
writer.addMutation(mutation);
} catch (MutationsRejectedException e) {
diff --git
a/test/src/main/java/org/apache/accumulo/test/fate/accumulo/FateMutatorImplIT.java
b/test/src/main/java/org/apache/accumulo/test/fate/accumulo/FateMutatorImplIT.java
index d5481bbab8..a9410df845 100644
---
a/test/src/main/java/org/apache/accumulo/test/fate/accumulo/FateMutatorImplIT.java
+++
b/test/src/main/java/org/apache/accumulo/test/fate/accumulo/FateMutatorImplIT.java
@@ -20,11 +20,11 @@ package org.apache.accumulo.test.fate.accumulo;
import static
org.apache.accumulo.core.fate.accumulo.FateMutator.Status.ACCEPTED;
import static
org.apache.accumulo.core.fate.accumulo.FateMutator.Status.REJECTED;
-import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.time.Duration;
+import java.util.UUID;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
@@ -72,8 +72,8 @@ public class FateMutatorImplIT extends SharedMiniClusterBase {
ClientContext context = (ClientContext) client;
- final long tid = RANDOM.get().nextLong() & 0x7fffffffffffffffL;
- FateId fateId =
FateId.from(FateInstanceType.fromNamespaceOrTableName(table), tid);
+ FateId fateId =
+ FateId.from(FateInstanceType.fromNamespaceOrTableName(table),
UUID.randomUUID());
// add some repos in order
FateMutatorImpl<FateIT.TestEnv> fateMutator = new
FateMutatorImpl<>(context, table, fateId);
@@ -103,8 +103,8 @@ public class FateMutatorImplIT extends
SharedMiniClusterBase {
ClientContext context = (ClientContext) client;
- final long tid = RANDOM.get().nextLong() & 0x7fffffffffffffffL;
- FateId fateId =
FateId.from(FateInstanceType.fromNamespaceOrTableName(table), tid);
+ FateId fateId =
+ FateId.from(FateInstanceType.fromNamespaceOrTableName(table),
UUID.randomUUID());
// use require status passing all statuses. without the status column
present this should fail
assertThrows(IllegalStateException.class,
diff --git
a/test/src/main/java/org/apache/accumulo/test/fate/accumulo/FateStoreIT.java
b/test/src/main/java/org/apache/accumulo/test/fate/accumulo/FateStoreIT.java
index 63e8d64703..582e18fb59 100644
--- a/test/src/main/java/org/apache/accumulo/test/fate/accumulo/FateStoreIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/fate/accumulo/FateStoreIT.java
@@ -18,6 +18,7 @@
*/
package org.apache.accumulo.test.fate.accumulo;
+import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
@@ -365,10 +366,11 @@ public abstract class FateStoreIT extends
SharedMiniClusterBase implements FateT
@Test
public void testCreateWithKeyCollision() throws Exception {
- // Replace the default hasing algorithm with one that always returns the
same tid so
+ // Replace the default hashing algorithm with one that always returns the
same tid so
// we can check duplicate detection with different keys
executeTest(this::testCreateWithKeyCollision,
AbstractFateStore.DEFAULT_MAX_DEFERRED,
- (instanceType, fateKey) -> FateId.from(instanceType, 1000));
+ (instanceType, fateKey) -> FateId.from(instanceType,
+ UUID.nameUUIDFromBytes("testing uuid".getBytes(UTF_8))));
}
protected void testCreateWithKeyCollision(FateStore<TestEnv> store,
ServerContext sctx) {
@@ -382,7 +384,9 @@ public abstract class FateStoreIT extends
SharedMiniClusterBase implements FateT
FateTxStore<TestEnv> txStore =
store.createAndReserve(fateKey1).orElseThrow();
try {
var e = assertThrows(IllegalStateException.class, () -> create(store,
fateKey2));
- assertEquals("Collision detected for tid 1000", e.getMessage());
+ assertEquals(
+ "Collision detected for tid " + UUID.nameUUIDFromBytes("testing
uuid".getBytes(UTF_8)),
+ e.getMessage());
assertEquals(fateKey1, txStore.getKey().orElseThrow());
} finally {
txStore.delete();
@@ -410,7 +414,7 @@ public abstract class FateStoreIT extends
SharedMiniClusterBase implements FateT
// and use the existing transaction, which we should.
deleteKey(fateId, sctx);
var e = assertThrows(IllegalStateException.class, () ->
store.createAndReserve(fateKey));
- assertEquals("Tx Key is missing from tid " + fateId.getTid(),
e.getMessage());
+ assertEquals("Tx Key is missing from tid " + fateId.getTxUUIDStr(),
e.getMessage());
// We should still be able to reserve and continue when not using a key
// just like a normal transaction
diff --git
a/test/src/main/java/org/apache/accumulo/test/fate/zookeeper/ZooStoreFateIT.java
b/test/src/main/java/org/apache/accumulo/test/fate/zookeeper/ZooStoreFateIT.java
index c679e0d54f..1b99cdb8a5 100644
---
a/test/src/main/java/org/apache/accumulo/test/fate/zookeeper/ZooStoreFateIT.java
+++
b/test/src/main/java/org/apache/accumulo/test/fate/zookeeper/ZooStoreFateIT.java
@@ -99,7 +99,7 @@ public class ZooStoreFateIT extends FateStoreIT {
// Get the existing status for the node and build a new node with an
empty key
// but uses the existing tid
- String txPath = ZK_ROOT + Constants.ZFATE + "/tx_" + fateId.getHexTid();
+ String txPath = ZK_ROOT + Constants.ZFATE + "/tx_" +
fateId.getTxUUIDStr();
Object currentNode = serializedCons.newInstance(new Object[]
{zk.getData(txPath)});
TStatus currentStatus = (TStatus) nodeStatus.get(currentNode);
// replace the node with no key and just a tid and existing status
diff --git
a/test/src/main/java/org/apache/accumulo/test/fate/zookeeper/ZookeeperFateIT.java
b/test/src/main/java/org/apache/accumulo/test/fate/zookeeper/ZookeeperFateIT.java
index c8688a8951..f66f226781 100644
---
a/test/src/main/java/org/apache/accumulo/test/fate/zookeeper/ZookeeperFateIT.java
+++
b/test/src/main/java/org/apache/accumulo/test/fate/zookeeper/ZookeeperFateIT.java
@@ -95,7 +95,7 @@ public class ZookeeperFateIT extends FateIT {
private static TStatus getTxStatus(ZooReaderWriter zrw, FateId fateId)
throws KeeperException, InterruptedException {
zrw.sync(ZK_ROOT);
- String txdir = String.format("%s%s/tx_%s", ZK_ROOT, Constants.ZFATE,
fateId.getHexTid());
+ String txdir = String.format("%s%s/tx_%s", ZK_ROOT, Constants.ZFATE,
fateId.getTxUUIDStr());
try (DataInputBuffer buffer = new DataInputBuffer()) {
var serialized = zrw.getData(txdir);
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/AmpleConditionalWriterIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/AmpleConditionalWriterIT.java
index fa35d5fc21..48255dc82a 100644
---
a/test/src/main/java/org/apache/accumulo/test/functional/AmpleConditionalWriterIT.java
+++
b/test/src/main/java/org/apache/accumulo/test/functional/AmpleConditionalWriterIT.java
@@ -319,7 +319,7 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
ctmi = new ConditionalTabletsMutatorImpl(context);
var tm6 = TabletMetadata.builder(e1).build(LOADED);
FateInstanceType type = FateInstanceType.fromTableId(tid);
- FateId fateId = FateId.from(type, 9L);
+ FateId fateId = FateId.from(type, UUID.randomUUID());
ctmi.mutateTablet(e1).requireAbsentOperation().requireSame(tm6, LOADED)
.putFile(stf5, new DataFileValue(0,
0)).putBulkFile(stf5.getTabletFile(), fateId)
.putFile(stf5, new DataFileValue(0, 0)).submit(tm -> false);
@@ -464,9 +464,10 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
ctmi = new ConditionalTabletsMutatorImpl(context);
FateInstanceType type = FateInstanceType.fromTableId(tid);
- FateId fateId2L = FateId.from(type, 2L);
+ FateId fateId1 = FateId.from(type, UUID.randomUUID());
+ FateId fateId2 = FateId.from(type, UUID.randomUUID());
ctmi.mutateTablet(e1).requireAbsentOperation().requireSame(tm1, FILES,
SELECTED)
- .putSelectedFiles(new SelectedFiles(Set.of(stf1, stf2, stf3), true,
fateId2L))
+ .putSelectedFiles(new SelectedFiles(Set.of(stf1, stf2, stf3), true,
fateId1))
.submit(tm -> false);
results = ctmi.process();
assertEquals(Status.REJECTED, results.get(e1).getStatus());
@@ -478,7 +479,7 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
.build(SELECTED);
ctmi = new ConditionalTabletsMutatorImpl(context);
ctmi.mutateTablet(e1).requireAbsentOperation().requireSame(tm2, FILES,
SELECTED)
- .putSelectedFiles(new SelectedFiles(Set.of(stf1, stf2, stf3), true,
fateId2L))
+ .putSelectedFiles(new SelectedFiles(Set.of(stf1, stf2, stf3), true,
fateId1))
.submit(tm -> false);
results = ctmi.process();
assertEquals(Status.ACCEPTED, results.get(e1).getStatus());
@@ -490,12 +491,11 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
// a list of selected files objects that are not the same as the current
tablet and expected to
// fail
var expectedToFail = new ArrayList<SelectedFiles>();
- FateId fateId3L = FateId.from(type, 3L);
- expectedToFail.add(new SelectedFiles(Set.of(stf1, stf2), true, fateId2L));
- expectedToFail.add(new SelectedFiles(Set.of(stf1, stf2, stf3, stf4), true,
fateId2L));
- expectedToFail.add(new SelectedFiles(Set.of(stf1, stf2, stf3), false,
fateId2L));
- expectedToFail.add(new SelectedFiles(Set.of(stf1, stf2, stf3), true,
fateId3L));
+ expectedToFail.add(new SelectedFiles(Set.of(stf1, stf2), true, fateId1));
+ expectedToFail.add(new SelectedFiles(Set.of(stf1, stf2, stf3, stf4), true,
fateId1));
+ expectedToFail.add(new SelectedFiles(Set.of(stf1, stf2, stf3), false,
fateId1));
+ expectedToFail.add(new SelectedFiles(Set.of(stf1, stf2, stf3), true,
fateId2));
for (var selectedFiles : expectedToFail) {
var tm3 = TabletMetadata.builder(e1).putFile(stf1, dfv).putFile(stf2,
dfv).putFile(stf3, dfv)
@@ -512,7 +512,7 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
}
var tm5 = TabletMetadata.builder(e1).putFile(stf1, dfv).putFile(stf2,
dfv).putFile(stf3, dfv)
- .putSelectedFiles(new SelectedFiles(Set.of(stf1, stf2, stf3), true,
fateId2L)).build();
+ .putSelectedFiles(new SelectedFiles(Set.of(stf1, stf2, stf3), true,
fateId1)).build();
ctmi = new ConditionalTabletsMutatorImpl(context);
ctmi.mutateTablet(e1).requireAbsentOperation().requireSame(tm5, FILES,
SELECTED)
.deleteSelectedFiles().submit(tm -> false);
@@ -540,7 +540,7 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
final Set<StoredTabletFile> storedTabletFiles = Set.of(stf1, stf2, stf3);
final boolean initiallySelectedAll = true;
final FateInstanceType type = FateInstanceType.fromTableId(tid);
- final FateId fateId = FateId.from(type, 2L);
+ final FateId fateId = FateId.from(type, UUID.randomUUID());
final SelectedFiles selectedFiles =
new SelectedFiles(storedTabletFiles, initiallySelectedAll, fateId);
@@ -632,7 +632,7 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
*
* <pre>
* {
- * "fateId": "FATE:META:123456",
+ * "fateId": "FATE:META:12345678-9abc-def1-2345-6789abcdef12",
* "selAll": true,
* "files": ["/path/to/file1.rf", "/path/to/file2.rf"]
* }
@@ -702,8 +702,8 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
var context = cluster.getServerContext();
FateInstanceType type = FateInstanceType.fromTableId(tid);
- FateId fateId1 = FateId.from(type, "1234");
- FateId fateId2 = FateId.from(type, "5678");
+ FateId fateId1 = FateId.from(type, UUID.randomUUID());
+ FateId fateId2 = FateId.from(type, UUID.randomUUID());
var opid1 = TabletOperationId.from(TabletOperationType.SPLITTING,
fateId1);
var opid2 = TabletOperationId.from(TabletOperationType.MERGING, fateId2);
@@ -756,69 +756,69 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
var ctmi = new ConditionalTabletsMutatorImpl(context);
FateInstanceType type = FateInstanceType.fromTableId(tid);
- FateId fateId45L = FateId.from(type, 45L);
- FateId fateId55L = FateId.from(type, 55L);
- FateId fateId56L = FateId.from(type, 56L);
- FateId fateId65L = FateId.from(type, 65L);
- FateId fateId75L = FateId.from(type, 75L);
+ FateId fateId1 = FateId.from(type, UUID.randomUUID());
+ FateId fateId2 = FateId.from(type, UUID.randomUUID());
+ FateId fateId3 = FateId.from(type, UUID.randomUUID());
+ FateId fateId4 = FateId.from(type, UUID.randomUUID());
+ FateId fateId5 = FateId.from(type, UUID.randomUUID());
var tabletMeta1 = TabletMetadata.builder(e1).build(COMPACTED);
ctmi.mutateTablet(e1).requireAbsentOperation().requireSame(tabletMeta1,
COMPACTED)
- .putCompacted(fateId55L)
- .submit(tabletMetadata ->
tabletMetadata.getCompacted().contains(fateId55L));
- var tabletMeta2 =
TabletMetadata.builder(e2).putCompacted(fateId45L).build(COMPACTED);
+ .putCompacted(fateId2)
+ .submit(tabletMetadata ->
tabletMetadata.getCompacted().contains(fateId2));
+ var tabletMeta2 =
TabletMetadata.builder(e2).putCompacted(fateId1).build(COMPACTED);
ctmi.mutateTablet(e2).requireAbsentOperation().requireSame(tabletMeta2,
COMPACTED)
- .putCompacted(fateId56L)
- .submit(tabletMetadata ->
tabletMetadata.getCompacted().contains(fateId56L));
+ .putCompacted(fateId3)
+ .submit(tabletMetadata ->
tabletMetadata.getCompacted().contains(fateId3));
var results = ctmi.process();
assertEquals(Status.ACCEPTED, results.get(e1).getStatus());
assertEquals(Status.REJECTED, results.get(e2).getStatus());
tabletMeta1 = context.getAmple().readTablet(e1);
- assertEquals(Set.of(fateId55L), tabletMeta1.getCompacted());
+ assertEquals(Set.of(fateId2), tabletMeta1.getCompacted());
assertEquals(Set.of(), context.getAmple().readTablet(e2).getCompacted());
ctmi = new ConditionalTabletsMutatorImpl(context);
ctmi.mutateTablet(e1).requireAbsentOperation().requireSame(tabletMeta1,
COMPACTED)
-
.putCompacted(fateId65L).putCompacted(fateId75L).submit(tabletMetadata ->
false);
+ .putCompacted(fateId4).putCompacted(fateId5).submit(tabletMetadata
-> false);
results = ctmi.process();
assertEquals(Status.ACCEPTED, results.get(e1).getStatus());
tabletMeta1 = context.getAmple().readTablet(e1);
- assertEquals(Set.of(fateId55L, fateId65L, fateId75L),
tabletMeta1.getCompacted());
+ assertEquals(Set.of(fateId2, fateId4, fateId5),
tabletMeta1.getCompacted());
// test require same with a superset
ctmi = new ConditionalTabletsMutatorImpl(context);
- tabletMeta1 =
TabletMetadata.builder(e2).putCompacted(fateId55L).putCompacted(fateId65L)
- .putCompacted(fateId75L).putCompacted(fateId45L).build(COMPACTED);
+ tabletMeta1 =
TabletMetadata.builder(e2).putCompacted(fateId2).putCompacted(fateId4)
+ .putCompacted(fateId5).putCompacted(fateId1).build(COMPACTED);
ctmi.mutateTablet(e1).requireAbsentOperation().requireSame(tabletMeta1,
COMPACTED)
-
.deleteCompacted(fateId55L).deleteCompacted(fateId65L).deleteCompacted(fateId75L)
+
.deleteCompacted(fateId2).deleteCompacted(fateId4).deleteCompacted(fateId5)
.submit(tabletMetadata -> false);
results = ctmi.process();
assertEquals(Status.REJECTED, results.get(e1).getStatus());
- assertEquals(Set.of(fateId55L, fateId65L, fateId75L),
+ assertEquals(Set.of(fateId2, fateId4, fateId5),
context.getAmple().readTablet(e1).getCompacted());
// test require same with a subset
ctmi = new ConditionalTabletsMutatorImpl(context);
- tabletMeta1 =
TabletMetadata.builder(e2).putCompacted(fateId55L).putCompacted(fateId65L)
- .build(COMPACTED);
+ tabletMeta1 =
+
TabletMetadata.builder(e2).putCompacted(fateId2).putCompacted(fateId4).build(COMPACTED);
ctmi.mutateTablet(e1).requireAbsentOperation().requireSame(tabletMeta1,
COMPACTED)
-
.deleteCompacted(fateId55L).deleteCompacted(fateId65L).deleteCompacted(fateId75L)
+
.deleteCompacted(fateId2).deleteCompacted(fateId4).deleteCompacted(fateId5)
.submit(tabletMetadata -> false);
results = ctmi.process();
assertEquals(Status.REJECTED, results.get(e1).getStatus());
- assertEquals(Set.of(fateId55L, fateId65L, fateId75L),
+ assertEquals(Set.of(fateId2, fateId4, fateId5),
context.getAmple().readTablet(e1).getCompacted());
// now use the exact set the tablet has
ctmi = new ConditionalTabletsMutatorImpl(context);
- tabletMeta1 =
TabletMetadata.builder(e2).putCompacted(fateId55L).putCompacted(fateId65L)
- .putCompacted(fateId75L).build(COMPACTED);
+ tabletMeta1 =
TabletMetadata.builder(e2).putCompacted(fateId2).putCompacted(fateId4)
+ .putCompacted(fateId5).build(COMPACTED);
ctmi.mutateTablet(e1).requireAbsentOperation().requireSame(tabletMeta1,
COMPACTED)
-
.deleteCompacted(fateId55L).deleteCompacted(fateId65L).deleteCompacted(fateId75L)
+
.deleteCompacted(fateId2).deleteCompacted(fateId4).deleteCompacted(fateId5)
.submit(tabletMetadata -> false);
results = ctmi.process();
assertEquals(Status.ACCEPTED, results.get(e1).getStatus());
@@ -837,8 +837,8 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
assertNull(rootMeta.getOperationId());
FateInstanceType type =
FateInstanceType.fromTableId(RootTable.EXTENT.tableId());
- FateId fateId = FateId.from(type, 7);
- TabletOperationId opid =
TabletOperationId.from(TabletOperationType.MERGING, fateId);
+ TabletOperationId opid =
+ TabletOperationId.from(TabletOperationType.MERGING, FateId.from(type,
UUID.randomUUID()));
var ctmi = new ConditionalTabletsMutatorImpl(context);
ctmi.mutateTablet(RootTable.EXTENT).requireAbsentOperation().requireAbsentLocation()
@@ -904,80 +904,77 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
var ctmi = new ConditionalTabletsMutatorImpl(context);
FateInstanceType type = FateInstanceType.fromTableId(tid);
- FateId fateId45L = FateId.from(type, 45L);
- FateId fateId55L = FateId.from(type, 55L);
- FateId fateId56L = FateId.from(type, 56L);
- FateId fateId65L = FateId.from(type, 65L);
- FateId fateId75L = FateId.from(type, 75L);
+ FateId fateId1 = FateId.from(type, UUID.randomUUID());
+ FateId fateId2 = FateId.from(type, UUID.randomUUID());
+ FateId fateId3 = FateId.from(type, UUID.randomUUID());
+ FateId fateId4 = FateId.from(type, UUID.randomUUID());
+ FateId fateId5 = FateId.from(type, UUID.randomUUID());
var tabletMeta1 =
TabletMetadata.builder(e1).build(USER_COMPACTION_REQUESTED);
ctmi.mutateTablet(e1).requireAbsentOperation()
- .requireSame(tabletMeta1,
USER_COMPACTION_REQUESTED).putUserCompactionRequested(fateId55L)
- .submit(
- tabletMetadata ->
tabletMetadata.getUserCompactionsRequested().contains(fateId55L));
- var tabletMeta2 =
TabletMetadata.builder(e2).putUserCompactionRequested(fateId45L)
+ .requireSame(tabletMeta1,
USER_COMPACTION_REQUESTED).putUserCompactionRequested(fateId2)
+ .submit(tabletMetadata ->
tabletMetadata.getUserCompactionsRequested().contains(fateId2));
+ var tabletMeta2 =
TabletMetadata.builder(e2).putUserCompactionRequested(fateId1)
.build(USER_COMPACTION_REQUESTED);
ctmi.mutateTablet(e2).requireAbsentOperation()
- .requireSame(tabletMeta2,
USER_COMPACTION_REQUESTED).putUserCompactionRequested(fateId56L)
- .submit(
- tabletMetadata ->
tabletMetadata.getUserCompactionsRequested().contains(fateId56L));
+ .requireSame(tabletMeta2,
USER_COMPACTION_REQUESTED).putUserCompactionRequested(fateId3)
+ .submit(tabletMetadata ->
tabletMetadata.getUserCompactionsRequested().contains(fateId3));
var results = ctmi.process();
assertEquals(Status.ACCEPTED, results.get(e1).getStatus());
assertEquals(Status.REJECTED, results.get(e2).getStatus());
tabletMeta1 = context.getAmple().readTablet(e1);
- assertEquals(Set.of(fateId55L),
tabletMeta1.getUserCompactionsRequested());
+ assertEquals(Set.of(fateId2), tabletMeta1.getUserCompactionsRequested());
assertEquals(Set.of(),
context.getAmple().readTablet(e2).getUserCompactionsRequested());
ctmi = new ConditionalTabletsMutatorImpl(context);
ctmi.mutateTablet(e1).requireAbsentOperation()
- .requireSame(tabletMeta1,
USER_COMPACTION_REQUESTED).putUserCompactionRequested(fateId65L)
- .putUserCompactionRequested(fateId75L).submit(tabletMetadata ->
false);
+ .requireSame(tabletMeta1,
USER_COMPACTION_REQUESTED).putUserCompactionRequested(fateId4)
+ .putUserCompactionRequested(fateId5).submit(tabletMetadata -> false);
results = ctmi.process();
assertEquals(Status.ACCEPTED, results.get(e1).getStatus());
tabletMeta1 = context.getAmple().readTablet(e1);
- assertEquals(Set.of(fateId55L, fateId65L, fateId75L),
- tabletMeta1.getUserCompactionsRequested());
+ assertEquals(Set.of(fateId2, fateId4, fateId5),
tabletMeta1.getUserCompactionsRequested());
// test require same with a superset
ctmi = new ConditionalTabletsMutatorImpl(context);
- tabletMeta1 =
TabletMetadata.builder(e2).putUserCompactionRequested(fateId55L)
-
.putUserCompactionRequested(fateId65L).putUserCompactionRequested(fateId75L)
-
.putUserCompactionRequested(fateId45L).build(USER_COMPACTION_REQUESTED);
+ tabletMeta1 =
TabletMetadata.builder(e2).putUserCompactionRequested(fateId2)
+
.putUserCompactionRequested(fateId4).putUserCompactionRequested(fateId5)
+
.putUserCompactionRequested(fateId1).build(USER_COMPACTION_REQUESTED);
ctmi.mutateTablet(e1).requireAbsentOperation()
.requireSame(tabletMeta1, USER_COMPACTION_REQUESTED)
-
.deleteUserCompactionRequested(fateId55L).deleteUserCompactionRequested(fateId65L)
- .deleteUserCompactionRequested(fateId75L).submit(tabletMetadata ->
false);
+
.deleteUserCompactionRequested(fateId2).deleteUserCompactionRequested(fateId4)
+ .deleteUserCompactionRequested(fateId5).submit(tabletMetadata ->
false);
results = ctmi.process();
assertEquals(Status.REJECTED, results.get(e1).getStatus());
- assertEquals(Set.of(fateId55L, fateId65L, fateId75L),
+ assertEquals(Set.of(fateId2, fateId4, fateId5),
context.getAmple().readTablet(e1).getUserCompactionsRequested());
// test require same with a subset
ctmi = new ConditionalTabletsMutatorImpl(context);
- tabletMeta1 =
TabletMetadata.builder(e2).putUserCompactionRequested(fateId55L)
-
.putUserCompactionRequested(fateId65L).build(USER_COMPACTION_REQUESTED);
+ tabletMeta1 =
TabletMetadata.builder(e2).putUserCompactionRequested(fateId2)
+
.putUserCompactionRequested(fateId4).build(USER_COMPACTION_REQUESTED);
ctmi.mutateTablet(e1).requireAbsentOperation()
.requireSame(tabletMeta1, USER_COMPACTION_REQUESTED)
-
.deleteUserCompactionRequested(fateId55L).deleteUserCompactionRequested(fateId65L)
- .deleteUserCompactionRequested(fateId75L).submit(tabletMetadata ->
false);
+
.deleteUserCompactionRequested(fateId2).deleteUserCompactionRequested(fateId4)
+ .deleteUserCompactionRequested(fateId5).submit(tabletMetadata ->
false);
results = ctmi.process();
assertEquals(Status.REJECTED, results.get(e1).getStatus());
- assertEquals(Set.of(fateId55L, fateId65L, fateId75L),
+ assertEquals(Set.of(fateId2, fateId4, fateId5),
context.getAmple().readTablet(e1).getUserCompactionsRequested());
// now use the exact set the tablet has
ctmi = new ConditionalTabletsMutatorImpl(context);
- tabletMeta1 =
TabletMetadata.builder(e2).putUserCompactionRequested(fateId55L)
-
.putUserCompactionRequested(fateId65L).putUserCompactionRequested(fateId75L)
+ tabletMeta1 =
TabletMetadata.builder(e2).putUserCompactionRequested(fateId2)
+
.putUserCompactionRequested(fateId4).putUserCompactionRequested(fateId5)
.build(USER_COMPACTION_REQUESTED);
ctmi.mutateTablet(e1).requireAbsentOperation()
.requireSame(tabletMeta1, USER_COMPACTION_REQUESTED)
-
.deleteUserCompactionRequested(fateId55L).deleteUserCompactionRequested(fateId65L)
- .deleteUserCompactionRequested(fateId75L).submit(tabletMetadata ->
false);
+
.deleteUserCompactionRequested(fateId2).deleteUserCompactionRequested(fateId4)
+ .deleteUserCompactionRequested(fateId5).submit(tabletMetadata ->
false);
results = ctmi.process();
assertEquals(Status.ACCEPTED, results.get(e1).getStatus());
assertEquals(Set.of(),
context.getAmple().readTablet(e1).getUserCompactionsRequested());
@@ -1044,9 +1041,9 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
final Set<KeyExtent> tabletsWithWalCompactFlush = Set.of(e1, e2, e3);
for (KeyExtent ke : tabletsWithWalCompactFlush) {
FateInstanceType type = FateInstanceType.fromTableId(ke.tableId());
- FateId fateId34L = FateId.from(type, 34L);
+ FateId fateId = FateId.from(type, UUID.randomUUID());
ctmi = new ConditionalTabletsMutatorImpl(context);
- ctmi.mutateTablet(ke).requireAbsentOperation().putCompacted(fateId34L)
+ ctmi.mutateTablet(ke).requireAbsentOperation().putCompacted(fateId)
.putFlushId(TestTabletMetadataFilter.VALID_FLUSH_ID).putWal(wal)
.submit(tabletMetadata -> false);
var results = ctmi.process();
@@ -1086,15 +1083,15 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
ConditionalTabletsMutatorImpl ctmi = new
ConditionalTabletsMutatorImpl(context);
Set<TabletMetadataFilter> filter = Set.of(new
TestTabletMetadataFilter());
FateInstanceType type = FateInstanceType.fromTableId(tid);
- FateId fateId34L = FateId.from(type, 34L);
- FateId fateId987L = FateId.from(type, 987L);
+ FateId fateId1 = FateId.from(type, UUID.randomUUID());
+ FateId fateId2 = FateId.from(type, UUID.randomUUID());
// make sure we read all tablets on table initially with no filters
testFilterApplied(context, Set.of(), Set.of(e1, e2, e3, e4),
"Initially, all tablets should be present");
// Set compacted on e2 but with no flush ID
- ctmi.mutateTablet(e2).requireAbsentOperation().putCompacted(fateId34L)
+ ctmi.mutateTablet(e2).requireAbsentOperation().putCompacted(fateId1)
.submit(tabletMetadata -> false);
var results = ctmi.process();
assertEquals(Status.ACCEPTED, results.get(e2).getStatus());
@@ -1121,7 +1118,7 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
// Set compacted and correct flush ID on e3
ctmi = new ConditionalTabletsMutatorImpl(context);
- ctmi.mutateTablet(e3).requireAbsentOperation().putCompacted(fateId987L)
+ ctmi.mutateTablet(e3).requireAbsentOperation().putCompacted(fateId2)
.putFlushId(TestTabletMetadataFilter.VALID_FLUSH_ID).submit(tabletMetadata ->
false);
results = ctmi.process();
assertEquals(Status.ACCEPTED, results.get(e3).getStatus());
@@ -1296,8 +1293,8 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
// run a test where a subset of tablets are modified, all modifications
should be accepted
FateInstanceType type = FateInstanceType.fromTableId(tableId);
- FateId fateId1 = FateId.from(type, 50);
- var opid1 = TabletOperationId.from(TabletOperationType.MERGING, fateId1);
+ var opid1 =
+ TabletOperationId.from(TabletOperationType.MERGING,
FateId.from(type, UUID.randomUUID()));
int expected = 0;
try (var tablets = ample.readTablets().forTable(tableId).fetch(OPID,
PREV_ROW).build();
@@ -1318,8 +1315,8 @@ public class AmpleConditionalWriterIT extends
AccumuloClusterHarness {
// run test where some will be accepted and some will be rejected and
ensure the counts come
// out as expected.
- FateId fateId2 = FateId.from(type, 51);
- var opid2 = TabletOperationId.from(TabletOperationType.MERGING, fateId2);
+ var opid2 =
+ TabletOperationId.from(TabletOperationType.MERGING,
FateId.from(type, UUID.randomUUID()));
accepted.set(0);
total.set(0);
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/ManagerAssignmentIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/ManagerAssignmentIT.java
index 2e61925e65..9494a7e9dc 100644
---
a/test/src/main/java/org/apache/accumulo/test/functional/ManagerAssignmentIT.java
+++
b/test/src/main/java/org/apache/accumulo/test/functional/ManagerAssignmentIT.java
@@ -32,6 +32,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
+import java.util.UUID;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -386,7 +387,7 @@ public class ManagerAssignmentIT extends
SharedMiniClusterBase {
var tableId = TableId.of(prepTableForScanTest(c, tableName));
FateInstanceType type = FateInstanceType.fromTableId(tableId);
- FateId fateId = FateId.from(type, 42L);
+ FateId fateId = FateId.from(type, UUID.randomUUID());
assertEquals(0, countTabletsWithLocation(c, tableId));
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
index 1c5efc4835..765e044232 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
@@ -692,7 +692,7 @@ public class MergeIT extends AccumuloClusterHarness {
var tablet = tabletsMutator.mutateTablet(extent);
ExternalCompactionId ecid =
ExternalCompactionId.generate(UUID.randomUUID());
FateInstanceType type = FateInstanceType.fromTableId(tableId);
- FateId fateId44L = FateId.from(type, 44L);
+ FateId fateId = FateId.from(type, UUID.randomUUID());
ReferencedTabletFile tmpFile =
ReferencedTabletFile.of(new
Path("file:///accumulo/tables/t-0/b-0/c1.rf"));
@@ -700,7 +700,7 @@ public class MergeIT extends AccumuloClusterHarness {
Set<StoredTabletFile> jobFiles =
Set.of(StoredTabletFile.of(new
Path("file:///accumulo/tables/t-0/b-0/b2.rf")));
CompactionMetadata ecMeta = new CompactionMetadata(jobFiles,
tmpFile, "localhost:4444",
- CompactionKind.SYSTEM, (short) 2, ceid, false, fateId44L);
+ CompactionKind.SYSTEM, (short) 2, ceid, false, fateId);
tablet.putExternalCompaction(ecid, ecMeta);
tablet.mutate();
}
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
index 64b5bc46a2..fdf663e0f3 100644
---
a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
+++
b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
@@ -169,7 +169,8 @@ public class SplitRecoveryIT extends ConfigurableMacBase {
dataFiles.put(new ReferencedTabletFile(new Path(tdir + "/" +
RFile.EXTENSION + "_000_000")),
new DataFileValue(1000017 + i, 10000 + i));
- FateId fateId =
FateId.from(FateInstanceType.fromTableId(extent.tableId()), 0);
+ FateId fateId =
+ FateId.from(FateInstanceType.fromTableId(extent.tableId()),
UUID.randomUUID());
SortedMap<StoredTabletFile,DataFileValue> storedFiles =
new TreeMap<>(MetadataTableUtil.updateTabletDataFile(fateId, extent,
dataFiles,
new MetadataTime(0, TimeType.LOGICAL), context, zl));
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/TabletManagementIteratorIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/TabletManagementIteratorIT.java
index 34723f3fae..22ae8c6535 100644
---
a/test/src/main/java/org/apache/accumulo/test/functional/TabletManagementIteratorIT.java
+++
b/test/src/main/java/org/apache/accumulo/test/functional/TabletManagementIteratorIT.java
@@ -346,9 +346,8 @@ public class TabletManagementIteratorIT extends
AccumuloClusterHarness {
// Sets an operation type on all tablets up to the end row
private void setOperationId(AccumuloClient client, String table, String
tableNameToModify,
Text end, TabletOperationType opType) throws TableNotFoundException {
- FateInstanceType type = FateInstanceType.fromNamespaceOrTableName(table);
- FateId fateId = FateId.from(type, 42L);
- var opid = TabletOperationId.from(opType, fateId);
+ FateInstanceType instanceType =
FateInstanceType.fromNamespaceOrTableName(table);
+ var opid = TabletOperationId.from(opType, FateId.from(instanceType,
UUID.randomUUID()));
TableId tableIdToModify =
TableId.of(client.tableOperations().tableIdMap().get(tableNameToModify));
try (TabletsMetadata tabletsMetadata =