This is an automated email from the ASF dual-hosted git repository.

cshannon pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/main by this push:
     new b4578f62a4 Clean up some usage of StoredTabletFile (#3837)
b4578f62a4 is described below

commit b4578f62a4bb2675547aaffe36bd4f11919e623c
Author: Dom G <domgargu...@apache.org>
AuthorDate: Thu Oct 12 07:44:27 2023 -0400

    Clean up some usage of StoredTabletFile (#3837)
    
    Update creation of StoredTabletFile objects to use new helper methods to 
simplify
    creating when initializing with infinite ranges.
---
 .../core/metadata/CompactableFileImpl.java         |   3 +-
 .../metadata/schema/ReferencedTabletFileTest.java  |  10 +-
 .../core/metadata/schema/TabletMetadataTest.java   |   9 +-
 .../accumulo/server/metadata/ServerAmpleImpl.java  |   4 +-
 .../constraints/MetadataConstraintsTest.java       | 228 +++++++++------------
 .../manager/tableOps/bulkVer2/LoadFiles.java       |   3 +-
 .../tablet/CompactableImplFileManagerTest.java     |   4 +-
 .../java/org/apache/accumulo/test/CloneIT.java     |   5 +-
 .../test/functional/GarbageCollectorIT.java        |  18 +-
 9 files changed, 126 insertions(+), 158 deletions(-)

diff --git 
a/core/src/main/java/org/apache/accumulo/core/metadata/CompactableFileImpl.java 
b/core/src/main/java/org/apache/accumulo/core/metadata/CompactableFileImpl.java
index 31fdc55d16..9944245f97 100644
--- 
a/core/src/main/java/org/apache/accumulo/core/metadata/CompactableFileImpl.java
+++ 
b/core/src/main/java/org/apache/accumulo/core/metadata/CompactableFileImpl.java
@@ -22,7 +22,6 @@ import java.net.URI;
 import java.util.Objects;
 
 import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
-import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.metadata.schema.DataFileValue;
 
 public class CompactableFileImpl implements CompactableFile {
@@ -31,7 +30,7 @@ public class CompactableFileImpl implements CompactableFile {
   private final DataFileValue dataFileValue;
 
   public CompactableFileImpl(URI uri, long size, long entries) {
-    this.storedTabletFile = StoredTabletFile.of(uri, new Range());
+    this.storedTabletFile = StoredTabletFile.of(uri);
     this.dataFileValue = new DataFileValue(size, entries);
   }
 
diff --git 
a/core/src/test/java/org/apache/accumulo/core/metadata/schema/ReferencedTabletFileTest.java
 
b/core/src/test/java/org/apache/accumulo/core/metadata/schema/ReferencedTabletFileTest.java
index 47deaeb0c9..93e7bc0525 100644
--- 
a/core/src/test/java/org/apache/accumulo/core/metadata/schema/ReferencedTabletFileTest.java
+++ 
b/core/src/test/java/org/apache/accumulo/core/metadata/schema/ReferencedTabletFileTest.java
@@ -22,8 +22,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-import java.net.URI;
-
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
@@ -113,10 +111,10 @@ public class ReferencedTabletFileTest {
     String metadataEntry = uglyVolume + "/tables/" + id + "/" + dir + "/" + 
filename;
     ReferencedTabletFile uglyFile =
         test(metadataEntry, "hdfs://nn.somewhere.com:86753/accumulo", id, dir, 
filename);
-    ReferencedTabletFile niceFile = StoredTabletFile.of(
-        URI.create(
-            "hdfs://nn.somewhere.com:86753/accumulo/tables/" + id + "/" + dir 
+ "/" + filename),
-        new Range()).getTabletFile();
+    ReferencedTabletFile niceFile = StoredTabletFile
+        .of(new Path(
+            "hdfs://nn.somewhere.com:86753/accumulo/tables/" + id + "/" + dir 
+ "/" + filename))
+        .getTabletFile();
     assertEquals(niceFile, uglyFile);
     assertEquals(niceFile.hashCode(), uglyFile.hashCode());
   }
diff --git 
a/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
 
b/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
index 3b856726be..244271534f 100644
--- 
a/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
+++ 
b/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
@@ -62,6 +62,7 @@ import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.Ta
 import org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType;
 import org.apache.accumulo.core.metadata.schema.TabletMetadata.LocationType;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.junit.jupiter.api.Test;
 
@@ -88,8 +89,8 @@ public class TabletMetadataTest {
     mutation.at().family(ClonedColumnFamily.NAME).qualifier("").put("OK");
 
     DataFileValue dfv1 = new DataFileValue(555, 23);
-    StoredTabletFile tf1 = new 
StoredTabletFile(serialize("hdfs://nn1/acc/tables/1/t-0001/df1.rf"));
-    StoredTabletFile tf2 = new 
StoredTabletFile(serialize("hdfs://nn1/acc/tables/1/t-0001/df2.rf"));
+    StoredTabletFile tf1 = StoredTabletFile.of(new 
Path("hdfs://nn1/acc/tables/1/t-0001/df1.rf"));
+    StoredTabletFile tf2 = StoredTabletFile.of(new 
Path("hdfs://nn1/acc/tables/1/t-0001/df2.rf"));
     
mutation.at().family(DataFileColumnFamily.NAME).qualifier(tf1.getMetadata()).put(dfv1.encode());
     DataFileValue dfv2 = new DataFileValue(234, 13);
     
mutation.at().family(DataFileColumnFamily.NAME).qualifier(tf2.getMetadata()).put(dfv2.encode());
@@ -105,8 +106,8 @@ public class TabletMetadataTest {
     
mutation.at().family(le2.getColumnFamily()).qualifier(le2.getColumnQualifier())
         .timestamp(le2.timestamp).put(le2.getValue());
 
-    StoredTabletFile sf1 = new 
StoredTabletFile(serialize("hdfs://nn1/acc/tables/1/t-0001/sf1.rf"));
-    StoredTabletFile sf2 = new 
StoredTabletFile(serialize("hdfs://nn1/acc/tables/1/t-0001/sf2.rf"));
+    StoredTabletFile sf1 = StoredTabletFile.of(new 
Path("hdfs://nn1/acc/tables/1/t-0001/sf1.rf"));
+    StoredTabletFile sf2 = StoredTabletFile.of(new 
Path("hdfs://nn1/acc/tables/1/t-0001/sf2.rf"));
     
mutation.at().family(ScanFileColumnFamily.NAME).qualifier(sf1.getMetadata()).put("");
     
mutation.at().family(ScanFileColumnFamily.NAME).qualifier(sf2.getMetadata()).put("");
 
diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/metadata/ServerAmpleImpl.java
 
b/server/base/src/main/java/org/apache/accumulo/server/metadata/ServerAmpleImpl.java
index de4a1f8585..ea57481987 100644
--- 
a/server/base/src/main/java/org/apache/accumulo/server/metadata/ServerAmpleImpl.java
+++ 
b/server/base/src/main/java/org/apache/accumulo/server/metadata/ServerAmpleImpl.java
@@ -141,8 +141,8 @@ public class ServerAmpleImpl extends AmpleImpl implements 
Ample {
 
     if (DataLevel.of(tableId) == DataLevel.ROOT) {
       // Directories are unexpected for the root tablet, so convert to stored 
tablet file
-      mutateRootGcCandidates(rgcc -> rgcc.add(candidates.stream().map(
-          reference -> 
StoredTabletFile.of(URI.create(reference.getMetadataPath()), new Range()))));
+      mutateRootGcCandidates(rgcc -> rgcc.add(candidates.stream()
+          .map(reference -> 
StoredTabletFile.of(URI.create(reference.getMetadataPath())))));
       return;
     }
 
diff --git 
a/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
 
b/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
index f891a0b132..38f6d27ad2 100644
--- 
a/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
+++ 
b/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
@@ -23,7 +23,6 @@ import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertNull;
 
 import java.lang.reflect.Method;
-import java.net.URI;
 import java.util.Base64;
 import java.util.List;
 
@@ -42,6 +41,7 @@ import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.Se
 import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily;
 import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher.Arbitrator;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.easymock.EasyMock;
 import org.junit.jupiter.api.Test;
@@ -160,130 +160,111 @@ public class MetadataConstraintsTest {
 
     // inactive txid
     m = new Mutation(new Text("0;foo"));
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new Value("12345"));
-    m.put(DataFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        DataFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new DataFileValue(1, 1).encodeAsValue());
     assertViolation(mc, m, (short) 8);
 
     // txid that throws exception
     m = new Mutation(new Text("0;foo"));
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new Value("9"));
-    m.put(DataFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        DataFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new DataFileValue(1, 1).encodeAsValue());
     assertViolation(mc, m, (short) 8);
 
     // active txid w/ file
     m = new Mutation(new Text("0;foo"));
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new Value("5"));
-    m.put(DataFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        DataFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new DataFileValue(1, 1).encodeAsValue());
     violations = mc.check(createEnv(), m);
     assertNull(violations);
 
     // active txid w/o file
     m = new Mutation(new Text("0;foo"));
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new Value("5"));
     assertViolation(mc, m, (short) 8);
 
     // two active txids w/ files
     m = new Mutation(new Text("0;foo"));
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new Value("5"));
-    m.put(DataFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        DataFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new DataFileValue(1, 1).encodeAsValue());
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2")).getMetadataText(),
         new Value("7"));
-    m.put(DataFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        DataFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2")).getMetadataText(),
         new DataFileValue(1, 1).encodeAsValue());
     assertViolation(mc, m, (short) 8);
 
     // two files w/ one active txid
     m = new Mutation(new Text("0;foo"));
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new Value("5"));
-    m.put(DataFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        DataFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new DataFileValue(1, 1).encodeAsValue());
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2")).getMetadataText(),
         new Value("5"));
-    m.put(DataFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        DataFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2")).getMetadataText(),
         new DataFileValue(1, 1).encodeAsValue());
     violations = mc.check(createEnv(), m);
     assertNull(violations);
 
     // two loaded w/ one active txid and one file
     m = new Mutation(new Text("0;foo"));
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new Value("5"));
-    m.put(DataFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        DataFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new DataFileValue(1, 1).encodeAsValue());
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2")).getMetadataText(),
         new Value("5"));
     assertViolation(mc, m, (short) 8);
 
     // active txid, mutation that looks like split
     m = new Mutation(new Text("0;foo"));
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new Value("5"));
     ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1"));
     violations = mc.check(createEnv(), m);
@@ -291,10 +272,9 @@ public class MetadataConstraintsTest {
 
     // inactive txid, mutation that looks like split
     m = new Mutation(new Text("0;foo"));
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new Value("12345"));
     ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1"));
     violations = mc.check(createEnv(), m);
@@ -302,10 +282,9 @@ public class MetadataConstraintsTest {
 
     // active txid, mutation that looks like a load
     m = new Mutation(new Text("0;foo"));
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new Value("5"));
     m.put(CurrentLocationColumnFamily.NAME, new Text("789"), new 
Value("127.0.0.1:9997"));
     violations = mc.check(createEnv(), m);
@@ -313,10 +292,9 @@ public class MetadataConstraintsTest {
 
     // inactive txid, mutation that looks like a load
     m = new Mutation(new Text("0;foo"));
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new Value("12345"));
     m.put(CurrentLocationColumnFamily.NAME, new Text("789"), new 
Value("127.0.0.1:9997"));
     violations = mc.check(createEnv(), m);
@@ -324,26 +302,24 @@ public class MetadataConstraintsTest {
 
     // deleting a load flag
     m = new Mutation(new Text("0;foo"));
-    m.putDelete(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText());
+    m.putDelete(BulkFileColumnFamily.NAME, StoredTabletFile
+        .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText());
     violations = mc.check(createEnv(), m);
     assertNull(violations);
 
     // Missing beginning of path
     m = new Mutation(new Text("0;foo"));
-    m.put(BulkFileColumnFamily.NAME, new Text(StoredTabletFile
-        .of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), 
new Range())
-        
.getMetadata().replace("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile", 
"/someFile")),
+    m.put(BulkFileColumnFamily.NAME,
+        new Text(StoredTabletFile.of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"))
+            .getMetadata()
+            .replace("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile", 
"/someFile")),
         new Value("5"));
     assertViolation(mc, m, (short) 9);
 
     // Missing tables directory in path
     m = new Mutation(new Text("0;foo"));
     m.put(BulkFileColumnFamily.NAME,
-        new Text(StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
+        new Text(StoredTabletFile.of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"))
             
.getMetadata().replace("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile",
                 "hdfs://1.2.3.4/accumulo/2a/t-0003/someFile")),
         new Value("5"));
@@ -351,10 +327,9 @@ public class MetadataConstraintsTest {
 
     // No DataFileColumnFamily included
     m = new Mutation(new Text("0;foo"));
-    m.put(BulkFileColumnFamily.NAME,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        BulkFileColumnFamily.NAME, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new Value("5"));
     assertViolation(mc, m, (short) 8);
 
@@ -386,8 +361,7 @@ public class MetadataConstraintsTest {
     // {"path":"","startRow":"","endRow":""}
     m = new Mutation(new Text("0;foo"));
     m.put(BulkFileColumnFamily.NAME,
-        new Text(StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
+        new Text(StoredTabletFile.of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"))
             .getMetadata().replaceFirst("\"path\":\".*\",\"startRow", 
"\"path\":\"\",\"startRow")),
         new Value("5"));
     assertViolation(mc, m, (short) 9);
@@ -411,10 +385,11 @@ public class MetadataConstraintsTest {
     // Bad Json - endRow will be replaced with encoded row without the 
exclusive byte 0x00 which is
     // required for an endRow so will fail validation
     m = new Mutation(new Text("0;foo"));
-    m.put(BulkFileColumnFamily.NAME, new Text(StoredTabletFile
-        .of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), 
new Range("a", "b"))
-        .getMetadata()
-        .replaceFirst("\"endRow\":\".*\"", "\"endRow\":\"" + 
encodeRowForMetadata("bad") + "\"")),
+    m.put(BulkFileColumnFamily.NAME,
+        new Text(StoredTabletFile
+            .of(new Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), 
new Range("a", "b"))
+            .getMetadata().replaceFirst("\"endRow\":\".*\"",
+                "\"endRow\":\"" + encodeRowForMetadata("bad") + "\"")),
         new Value("5"));
     assertViolation(mc, m, (short) 9);
 
@@ -437,9 +412,10 @@ public class MetadataConstraintsTest {
 
     // Missing beginning of path
     m = new Mutation(new Text("0;foo"));
-    m.put(columnFamily, new Text(StoredTabletFile
-        .of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), 
new Range())
-        
.getMetadata().replace("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile", 
"/someFile")),
+    m.put(columnFamily,
+        new Text(StoredTabletFile.of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"))
+            .getMetadata()
+            .replace("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile", 
"/someFile")),
         value);
     assertViolation(mc, m, (short) 9);
 
@@ -461,8 +437,7 @@ public class MetadataConstraintsTest {
     // {"path":"","startRow":"","endRow":""}
     m = new Mutation(new Text("0;foo"));
     m.put(columnFamily,
-        new Text(StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
+        new Text(StoredTabletFile.of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"))
             .getMetadata().replaceFirst("\"path\":\".*\",\"startRow", 
"\"path\":\"\",\"startRow")),
         value);
     assertViolation(mc, m, (short) 9);
@@ -496,18 +471,18 @@ public class MetadataConstraintsTest {
     // Bad Json - endRow will be replaced with encoded row without the 
exclusive byte 0x00 which is
     // required for an endRow so this will fail validation
     m = new Mutation(new Text("0;foo"));
-    m.put(columnFamily, new Text(StoredTabletFile
-        .of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), 
new Range("a", "b"))
-        .getMetadata()
-        .replaceFirst("\"endRow\":\".*\"", "\"endRow\":\"" + 
encodeRowForMetadata("b") + "\"")),
+    m.put(columnFamily,
+        new Text(StoredTabletFile
+            .of(new Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), 
new Range("a", "b"))
+            .getMetadata()
+            .replaceFirst("\"endRow\":\".*\"", "\"endRow\":\"" + 
encodeRowForMetadata("b") + "\"")),
         value);
     assertViolation(mc, m, (short) 9);
 
     // Missing tables directory in path
     m = new Mutation(new Text("0;foo"));
     m.put(columnFamily,
-        new Text(StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
+        new Text(StoredTabletFile.of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"))
             
.getMetadata().replace("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile",
                 "hdfs://1.2.3.4/accumulo/2a/t-0003/someFile")),
         new DataFileValue(1, 1).encodeAsValue());
@@ -515,19 +490,20 @@ public class MetadataConstraintsTest {
 
     // Should pass validation (inf range)
     m = new Mutation(new Text("0;foo"));
-    m.put(columnFamily,
-        StoredTabletFile
-            
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new 
Range())
-            .getMetadataText(),
+    m.put(
+        columnFamily, StoredTabletFile
+            .of(new 
Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile")).getMetadataText(),
         new DataFileValue(1, 1).encodeAsValue());
     violations = mc.check(createEnv(), m);
     assertNull(violations);
 
     // Should pass validation with range set
     m = new Mutation(new Text("0;foo"));
-    m.put(columnFamily, StoredTabletFile
-        .of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), 
new Range("a", "b"))
-        .getMetadataText(), new DataFileValue(1, 1).encodeAsValue());
+    m.put(columnFamily,
+        StoredTabletFile
+            .of(new Path("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), 
new Range("a", "b"))
+            .getMetadataText(),
+        new DataFileValue(1, 1).encodeAsValue());
     violations = mc.check(createEnv(), m);
     assertNull(violations);
 
diff --git 
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/bulkVer2/LoadFiles.java
 
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/bulkVer2/LoadFiles.java
index 6d1cbb027e..a5234819dc 100644
--- 
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/bulkVer2/LoadFiles.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/bulkVer2/LoadFiles.java
@@ -41,7 +41,6 @@ import org.apache.accumulo.core.clientImpl.bulk.BulkSerialize;
 import org.apache.accumulo.core.clientImpl.bulk.LoadMappingIterator;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.TableId;
 import org.apache.accumulo.core.dataImpl.KeyExtent;
 import org.apache.accumulo.core.dataImpl.thrift.TKeyExtent;
@@ -291,7 +290,7 @@ class LoadFiles extends ManagerRepo {
 
         for (final Bulk.FileInfo fileInfo : files) {
           StoredTabletFile fullPath =
-              StoredTabletFile.of(new Path(bulkDir, 
fileInfo.getFileName()).toUri(), new Range());
+              StoredTabletFile.of(new Path(bulkDir, fileInfo.getFileName()));
           byte[] val =
               new DataFileValue(fileInfo.getEstFileSize(), 
fileInfo.getEstNumEntries()).encode();
           mutation.put(fam, fullPath.getMetadata().getBytes(UTF_8), val);
diff --git 
a/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/CompactableImplFileManagerTest.java
 
b/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/CompactableImplFileManagerTest.java
index 9f6f115335..6f43f7ba51 100644
--- 
a/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/CompactableImplFileManagerTest.java
+++ 
b/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/CompactableImplFileManagerTest.java
@@ -43,6 +43,7 @@ import 
org.apache.accumulo.core.spi.compaction.CompactionExecutorId;
 import org.apache.accumulo.core.spi.compaction.CompactionJob;
 import org.apache.accumulo.core.spi.compaction.CompactionKind;
 import org.apache.accumulo.tserver.tablet.CompactableImpl.FileSelectionStatus;
+import org.apache.hadoop.fs.Path;
 import org.junit.jupiter.api.Test;
 
 import com.google.common.collect.Sets;
@@ -385,8 +386,7 @@ public class CompactableImplFileManagerTest {
   }
 
   static StoredTabletFile newFile(String f) {
-    return new StoredTabletFile(
-        StoredTabletFile.serialize("hdfs://nn1/accumulo/tables/1/t-0001/" + 
f));
+    return StoredTabletFile.of(new Path("hdfs://nn1/accumulo/tables/1/t-0001/" 
+ f));
   }
 
   static Set<StoredTabletFile> newFiles(String... strings) {
diff --git a/test/src/main/java/org/apache/accumulo/test/CloneIT.java 
b/test/src/main/java/org/apache/accumulo/test/CloneIT.java
index de1747818a..a55734bb36 100644
--- a/test/src/main/java/org/apache/accumulo/test/CloneIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CloneIT.java
@@ -22,7 +22,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-import java.net.URI;
 import java.util.HashSet;
 import java.util.Map.Entry;
 
@@ -32,7 +31,6 @@ import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.TableId;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.dataImpl.KeyExtent;
@@ -45,6 +43,7 @@ import 
org.apache.accumulo.core.metadata.schema.TabletDeletedException;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.harness.AccumuloClusterHarness;
 import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.junit.jupiter.api.Test;
 
@@ -410,6 +409,6 @@ public class CloneIT extends AccumuloClusterHarness {
   }
 
   private static String getMetadata(String file) {
-    return StoredTabletFile.of(URI.create(file), new Range()).getMetadata();
+    return StoredTabletFile.of(new Path(file)).getMetadata();
   }
 }
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
 
b/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
index 6f3e3c3d87..c7cb622927 100644
--- 
a/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
@@ -32,7 +32,6 @@ import java.time.Duration;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map.Entry;
 import java.util.Objects;
@@ -333,9 +332,8 @@ public class GarbageCollectorIT extends ConfigurableMacBase 
{
         List.of(new 
GcCandidate("hdfs://foo.com:6000/user/foo/tables/+r/t-0/F00.rf", 0L),
             new 
GcCandidate("hdfs://foo.com:6000/user/foo/tables/+r/t-0/F001.rf", 1L));
 
-    List<StoredTabletFile> stfs = new LinkedList<>();
-    candidates.stream().forEach(
-        temp -> stfs.add(new 
StoredTabletFile(StoredTabletFile.serialize(temp.getPath()))));
+    List<StoredTabletFile> stfs = candidates.stream()
+        .map(temp -> StoredTabletFile.of(new 
Path(temp.getPath()))).collect(Collectors.toList());
 
     log.debug("Adding root table GcCandidates");
     ample.putGcCandidates(tableId, stfs);
@@ -347,7 +345,7 @@ public class GarbageCollectorIT extends ConfigurableMacBase 
{
     while (cIter.hasNext()) {
       // Duplicate these entries back into zookeeper
       ample.putGcCandidates(tableId,
-          List.of(new 
StoredTabletFile(StoredTabletFile.serialize(cIter.next().getPath()))));
+          List.of(StoredTabletFile.of(new Path(cIter.next().getPath()))));
       counter++;
     }
     // Ensure Zookeeper collapsed the entries and did not support duplicates.
@@ -473,11 +471,9 @@ public class GarbageCollectorIT extends 
ConfigurableMacBase {
 
     // Create multiple candidate entries
     List<StoredTabletFile> stfs = Stream
-        .of(new StoredTabletFile(
-            
StoredTabletFile.serialize("hdfs://foo.com:6000/user/foo/tables/a/t-0/F00.rf")),
-            new StoredTabletFile(
-                
StoredTabletFile.serialize("hdfs://foo.com:6000/user/foo/tables/b/t-0/F00.rf")))
-        .collect(Collectors.toList());
+        .of("hdfs://foo.com:6000/user/foo/tables/a/t-0/F00.rf",
+            "hdfs://foo.com:6000/user/foo/tables/b/t-0/F00.rf")
+        .map(Path::new).map(StoredTabletFile::of).collect(Collectors.toList());
 
     log.debug("Adding candidates to table {}", tableId);
     ample.putGcCandidates(tableId, stfs);
@@ -494,7 +490,7 @@ public class GarbageCollectorIT extends ConfigurableMacBase 
{
     GcCandidate deleteCandidate = candidates.get(0);
     assertNotNull(deleteCandidate);
     ample.putGcCandidates(tableId,
-        List.of(new 
StoredTabletFile(StoredTabletFile.serialize(deleteCandidate.getPath()))));
+        List.of(StoredTabletFile.of(new Path(deleteCandidate.getPath()))));
 
     log.debug("Deleting Candidate {}", deleteCandidate);
     ample.deleteGcCandidates(datalevel, List.of(deleteCandidate), 
Ample.GcCandidateType.INUSE);

Reply via email to