This is an automated email from the ASF dual-hosted git repository.

ctubbsii pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/master by this push:
     new 6abe601  Fix new warnings and broken IT
6abe601 is described below

commit 6abe601a8b6c20ed004b9f30373d9614d6a0f245
Author: Christopher Tubbs <ctubb...@apache.org>
AuthorDate: Mon Apr 6 16:00:10 2020 -0400

    Fix new warnings and broken IT
    
    * Fix unused variables leftover from #1453
    * Fix warnings and NamespacesIT from #1053/#1060
      * Fix ambiguous use of array as varargs param
      * Pass around importDirs as List instead of array
      * Export table before attempting import (NamespacesIT)
        in order to create the metadata
---
 .../core/clientImpl/TableOperationsImpl.java         | 20 +++++++++-----------
 .../server/master/state/MetaDataStateStore.java      |  7 -------
 .../accumulo/server/util/MasterMetadataUtil.java     | 15 ---------------
 .../master/tableOps/tableImport/ImportTable.java     |  5 +++--
 .../accumulo/tserver/tablet/DatafileManager.java     |  4 +---
 .../java/org/apache/accumulo/test/NamespacesIT.java  |  2 ++
 6 files changed, 15 insertions(+), 38 deletions(-)

diff --git 
a/core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsImpl.java
 
b/core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsImpl.java
index 6428359..8382c5e 100644
--- 
a/core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsImpl.java
+++ 
b/core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsImpl.java
@@ -447,8 +447,7 @@ public class TableOperationsImpl extends 
TableOperationsHelper {
 
         if (splits.size() <= 2) {
           addSplits(env.tableName, new TreeSet<>(splits), env.tableId);
-          for (int i = 0; i < splits.size(); i++)
-            env.latch.countDown();
+          splits.forEach(s -> env.latch.countDown());
           return;
         }
 
@@ -1492,7 +1491,7 @@ public class TableOperationsImpl extends 
TableOperationsHelper {
    *           if zero or more than one copy of the exportMetadata.zip file 
are found in the
    *           directories provided.
    */
-  public static Path findExportFile(ClientContext context, String[] importDirs)
+  public static Path findExportFile(ClientContext context, List<String> 
importDirs)
       throws AccumuloException {
     LinkedHashSet<Path> exportFiles = new LinkedHashSet<>();
     for (String importDir : importDirs) {
@@ -1550,15 +1549,14 @@ public class TableOperationsImpl extends 
TableOperationsHelper {
     checkArgument(tableName.length() <= MAX_TABLE_NAME_LEN,
         "Table name is longer than " + MAX_TABLE_NAME_LEN + " characters");
 
-    String[] importDirs = importDir.split(",");
-    try {
-      for (int i = 0; i < importDirs.length; i++) {
-        importDirs[i] = checkPath(importDirs[i], "Table", "").toString();
+    List<String> importDirs = new ArrayList<>();
+    for (String dir : importDir.split(",")) {
+      try {
+        importDirs.add(checkPath(dir, "Table", "").toString());
+      } catch (IOException e) {
+        throw new AccumuloException(e);
       }
-    } catch (IOException e) {
-      throw new AccumuloException(e);
     }
-    String normedImportDir = StringUtils.join(importDirs, ",");
 
     try {
       Path exportFilePath = findExportFile(context, importDirs);
@@ -1580,7 +1578,7 @@ public class TableOperationsImpl extends 
TableOperationsHelper {
     }
 
     List<ByteBuffer> args = 
Arrays.asList(ByteBuffer.wrap(tableName.getBytes(UTF_8)),
-        ByteBuffer.wrap(normedImportDir.getBytes(UTF_8)));
+        ByteBuffer.wrap(StringUtils.join(importDirs, ",").getBytes(UTF_8)));
 
     Map<String,String> opts = Collections.emptyMap();
 
diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
 
b/server/base/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
index ce2fc7f..a92b69d 100644
--- 
a/server/base/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
+++ 
b/server/base/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
@@ -30,20 +30,13 @@ import 
org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.metadata.schema.TabletMetadata.LocationType;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
 import org.apache.hadoop.fs.Path;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 class MetaDataStateStore implements TabletStateStore {
 
-  private static final int THREADS = 4;
-  private static final int LATENCY = 1000;
-  private static final int MAX_MEMORY = 200 * 1024 * 1024;
-
   protected final ClientContext context;
   protected final CurrentState state;
   private final String targetTableName;
   private final Ample ample;
-  private static final Logger log = 
LoggerFactory.getLogger(MetaDataStateStore.class);
 
   protected MetaDataStateStore(ClientContext context, CurrentState state, 
String targetTableName) {
     this.context = context;
diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
 
b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
index 56112df..293252e 100644
--- 
a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
+++ 
b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
@@ -18,8 +18,6 @@
  */
 package org.apache.accumulo.server.util;
 
-import static 
org.apache.accumulo.fate.util.UtilWaitThread.sleepUninterruptibly;
-
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -29,7 +27,6 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.Scanner;
@@ -55,7 +52,6 @@ import org.apache.accumulo.fate.zookeeper.ZooLock;
 import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.hadoop.io.Text;
-import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -168,17 +164,6 @@ public class MasterMetadataUtil {
     }
   }
 
-  private static TServerInstance getTServerInstance(String address, ZooLock 
zooLock) {
-    while (true) {
-      try {
-        return new TServerInstance(address, zooLock.getSessionId());
-      } catch (KeeperException | InterruptedException e) {
-        log.error("{}", e.getMessage(), e);
-      }
-      sleepUninterruptibly(1, TimeUnit.SECONDS);
-    }
-  }
-
   public static void replaceDatafiles(ServerContext context, KeyExtent extent,
       Set<StoredTabletFile> datafilesToDelete, Set<StoredTabletFile> 
scanFiles, TabletFile path,
       Long compactionId, DataFileValue size, ZooLock zooLock) {
diff --git 
a/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/ImportTable.java
 
b/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/ImportTable.java
index e13ec80..a432578 100644
--- 
a/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/ImportTable.java
+++ 
b/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/ImportTable.java
@@ -26,6 +26,7 @@ import java.io.InputStreamReader;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.stream.Collectors;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipInputStream;
 
@@ -97,8 +98,8 @@ public class ImportTable extends MasterRepo {
   @SuppressFBWarnings(value = "OS_OPEN_STREAM",
       justification = "closing intermediate readers would close the 
ZipInputStream")
   public void checkVersions(Master env) throws 
AcceptableThriftTableOperationException {
-    String[] exportDirs =
-        tableInfo.directories.stream().map(dm -> 
dm.exportDir).toArray(String[]::new);
+    List<String> exportDirs =
+        tableInfo.directories.stream().map(dm -> 
dm.exportDir).collect(Collectors.toList());
 
     log.debug("Searching for export file in {}", exportDirs);
 
diff --git 
a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
 
b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
index 6f5d71a..856079a 100644
--- 
a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
+++ 
b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
@@ -45,7 +45,6 @@ import org.apache.accumulo.core.util.MapCounter;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.replication.StatusUtil;
 import org.apache.accumulo.server.util.MasterMetadataUtil;
 import org.apache.accumulo.server.util.MetadataTableUtil;
@@ -509,7 +508,6 @@ class DatafileManager {
           newDatafile.getPath());
     }
 
-    TServerInstance lastLocation = null;
     // calling insert to get the new file before inserting into the metadata
     StoredTabletFile newFile = newDatafile.insert();
     synchronized (tablet) {
@@ -537,7 +535,7 @@ class DatafileManager {
 
       tablet.computeNumEntries();
 
-      lastLocation = tablet.resetLastLocation();
+      tablet.resetLastLocation();
 
       tablet.setLastCompactionID(compactionId);
       t2 = System.currentTimeMillis();
diff --git a/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java 
b/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
index f38bd53..6f00ba0 100644
--- a/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
@@ -978,6 +978,8 @@ public class NamespacesIT extends SharedMiniClusterBase {
     ops.create("a");
     assertAccumuloExceptionNoNamespace(
         () -> ops.clone("a", tableName, true, Collections.emptyMap(), 
Collections.emptySet()));
+    ops.offline("a", true);
+    ops.exportTable("a", System.getProperty("user.dir") + "/target");
     assertAccumuloExceptionNoNamespace(
         () -> ops.importTable(tableName, System.getProperty("user.dir") + 
"/target"));
 

Reply via email to