ACCUMULO-564 changes for 0.23 compile compatiblity git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1332674 13f79535-47bb-0310-9956-ffa450edef68 (cherry picked from commit a5765d111e67b5f1cd645909285f729b902c9b6a)
Reason: Hadoop2 Compat Author: Billie Rinaldi <bil...@apache.org> Ref: ACCUMULO-1792 This is a partial backport from 1.5 that excludes the wiki-search example portions but includes changes to Accumulo.java and DefaultServlet.java A few minor 1.4.3 specific hacks to get compliation to work against hadoop 2.0.2-alpha - CoordinateRecoveryTask.java and LogSort.java Author: Jonathan M Hsieh <j...@cloudera.com> Signed-off-by: Eric Newton <eric.new...@gmail.com> Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/31e4dd15 Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/31e4dd15 Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/31e4dd15 Branch: refs/heads/1.4.5-SNAPSHOT Commit: 31e4dd15f5cb8bc85372dd66849e3452bae3ee7f Parents: 2c83ca3 Author: Jonathan M Hsieh <j...@cloudera.com> Authored: Thu May 30 17:41:58 2013 -0700 Committer: Eric Newton <eric.new...@gmail.com> Committed: Mon Nov 25 16:06:42 2013 -0500 ---------------------------------------------------------------------- .../src/main/java/org/apache/accumulo/server/Accumulo.java | 1 - .../apache/accumulo/server/master/CoordinateRecoveryTask.java | 3 +++ .../main/java/org/apache/accumulo/server/master/LogSort.java | 2 +- .../accumulo/server/monitor/servlets/DefaultServlet.java | 6 +++--- 4 files changed, 7 insertions(+), 5 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/accumulo/blob/31e4dd15/src/server/src/main/java/org/apache/accumulo/server/Accumulo.java ---------------------------------------------------------------------- diff --git a/src/server/src/main/java/org/apache/accumulo/server/Accumulo.java b/src/server/src/main/java/org/apache/accumulo/server/Accumulo.java index 32462b7..253962b 100644 --- a/src/server/src/main/java/org/apache/accumulo/server/Accumulo.java +++ b/src/server/src/main/java/org/apache/accumulo/server/Accumulo.java @@ -43,7 +43,6 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction; import org.apache.log4j.Logger; import org.apache.log4j.helpers.LogLog; import org.apache.log4j.xml.DOMConfigurator; http://git-wip-us.apache.org/repos/asf/accumulo/blob/31e4dd15/src/server/src/main/java/org/apache/accumulo/server/master/CoordinateRecoveryTask.java ---------------------------------------------------------------------- diff --git a/src/server/src/main/java/org/apache/accumulo/server/master/CoordinateRecoveryTask.java b/src/server/src/main/java/org/apache/accumulo/server/master/CoordinateRecoveryTask.java index 79065a2..64ed42e 100644 --- a/src/server/src/main/java/org/apache/accumulo/server/master/CoordinateRecoveryTask.java +++ b/src/server/src/main/java/org/apache/accumulo/server/master/CoordinateRecoveryTask.java @@ -257,6 +257,9 @@ public class CoordinateRecoveryTask implements Runnable { return new RecoveryStatus(logFile.server, logFile.file, (sortJob == null ? 0. : sortJob.mapProgress()), (sortJob == null ? 0. : sortJob.reduceProgress()), (int) (System.currentTimeMillis() - copyStartTime), (sortJob != null) ? 1. : (copySize == 0 ? 0 : copiedSoFar() / (double) copySize)); + } catch (InterruptedException ie) { + // Hadoop 2.0.2-alpha's Job.mapProgress throws Interrupted Exception. 1.x and 2.0.4 do not. + return new RecoveryStatus(logFile.server, logFile.file, 1.0, 1.0, (int) (System.currentTimeMillis() - copyStartTime), 1.0); } catch (NullPointerException npe) { return new RecoveryStatus(logFile.server, logFile.file, 1.0, 1.0, (int) (System.currentTimeMillis() - copyStartTime), 1.0); } http://git-wip-us.apache.org/repos/asf/accumulo/blob/31e4dd15/src/server/src/main/java/org/apache/accumulo/server/master/LogSort.java ---------------------------------------------------------------------- diff --git a/src/server/src/main/java/org/apache/accumulo/server/master/LogSort.java b/src/server/src/main/java/org/apache/accumulo/server/master/LogSort.java index 1e7f29b..006d06e 100644 --- a/src/server/src/main/java/org/apache/accumulo/server/master/LogSort.java +++ b/src/server/src/main/java/org/apache/accumulo/server/master/LogSort.java @@ -81,8 +81,8 @@ public class LogSort extends Configured implements Tool { @Override public void abortTask(TaskAttemptContext context) { - super.abortTask(context); try { + super.abortTask(context); outputFileSystem.delete(outputPath, true); } catch (IOException ex) { throw new RuntimeException(ex); http://git-wip-us.apache.org/repos/asf/accumulo/blob/31e4dd15/src/server/src/main/java/org/apache/accumulo/server/monitor/servlets/DefaultServlet.java ---------------------------------------------------------------------- diff --git a/src/server/src/main/java/org/apache/accumulo/server/monitor/servlets/DefaultServlet.java b/src/server/src/main/java/org/apache/accumulo/server/monitor/servlets/DefaultServlet.java index 0eb2e45..465b253 100644 --- a/src/server/src/main/java/org/apache/accumulo/server/monitor/servlets/DefaultServlet.java +++ b/src/server/src/main/java/org/apache/accumulo/server/monitor/servlets/DefaultServlet.java @@ -53,7 +53,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.mapred.ClusterStatus; import org.apache.hadoop.mapred.JobClient; @@ -307,8 +307,8 @@ public class DefaultServlet extends BasicServlet { tableRow(sb, (highlight = !highlight), "Unreplicated Capacity", bytes(fs.getRawCapacity())); tableRow(sb, (highlight = !highlight), "% Used", NumberType.commas(fs.getRawUsed() * 100. / fs.getRawCapacity(), 0, 90, 0, 100) + "%"); tableRow(sb, (highlight = !highlight), "Corrupt Blocks", NumberType.commas(fs.getCorruptBlocksCount(), 0, 0)); - DatanodeInfo[] liveNodes = fs.getClient().datanodeReport(DatanodeReportType.LIVE); - DatanodeInfo[] deadNodes = fs.getClient().datanodeReport(DatanodeReportType.DEAD); + DatanodeInfo[] liveNodes = fs.getClient().datanodeReport(FSConstants.DatanodeReportType.LIVE); + DatanodeInfo[] deadNodes = fs.getClient().datanodeReport(FSConstants.DatanodeReportType.DEAD); tableRow(sb, (highlight = !highlight), "<a href='" + liveUrl + "'>Live Data Nodes</a>", NumberType.commas(liveNodes.length)); tableRow(sb, (highlight = !highlight), "<a href='" + deadUrl + "'>Dead Data Nodes</a>", NumberType.commas(deadNodes.length)); long count = 0;