# IGNITE-226: WIP.
Project: http://git-wip-us.apache.org/repos/asf/incubator-ignite/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-ignite/commit/fd1f854b Tree: http://git-wip-us.apache.org/repos/asf/incubator-ignite/tree/fd1f854b Diff: http://git-wip-us.apache.org/repos/asf/incubator-ignite/diff/fd1f854b Branch: refs/heads/ignite-143 Commit: fd1f854bb81c6388fc5ab761c9346237ebc92051 Parents: 75eb190 Author: vozerov-gridgain <voze...@gridgain.com> Authored: Fri Feb 13 17:35:55 2015 +0300 Committer: vozerov-gridgain <voze...@gridgain.com> Committed: Fri Feb 13 17:35:55 2015 +0300 ---------------------------------------------------------------------- .../igfs/hadoop/v1/IgfsHadoopFileSystem.java | 18 ++--- .../igfs/hadoop/v2/IgfsHadoopFileSystem.java | 14 ++-- .../igfs/hadoop/IgfsHadoopEndpoint.java | 16 ++-- .../hadoop/IgfsHadoopFileSystemWrapper.java | 4 +- .../internal/igfs/hadoop/IgfsHadoopInProc.java | 48 ++++++------ .../internal/igfs/hadoop/IgfsHadoopOutProc.java | 22 +++--- .../internal/igfs/hadoop/IgfsHadoopWrapper.java | 16 ++-- .../hadoop/GridHadoopClassLoader.java | 8 +- .../GridHadoopDefaultMapReducePlanner.java | 8 +- .../GridHadoopClientProtocolSelfTest.java | 30 ++++---- .../apache/ignite/igfs/IgfsEventsTestSuite.java | 80 ++++++++++---------- .../IgfsHadoop20FileSystemAbstractSelfTest.java | 46 +++++------ .../igfs/IgfsHadoopDualAbstractSelfTest.java | 60 +++++++-------- .../IgfsHadoopFileSystemAbstractSelfTest.java | 38 +++++----- .../IgfsHadoopFileSystemClientSelfTest.java | 30 ++++---- .../IgfsHadoopFileSystemHandshakeSelfTest.java | 38 +++++----- .../IgfsHadoopFileSystemIpcCacheSelfTest.java | 16 ++-- ...IgfsHadoopFileSystemLoggerStateSelfTest.java | 42 +++++----- ...fsHadoopFileSystemSecondaryModeSelfTest.java | 42 +++++----- .../igfs/IgfsNearOnlyMultiNodeSelfTest.java | 14 ++-- .../hadoop/GridHadoopAbstractSelfTest.java | 36 ++++----- .../hadoop/GridHadoopAbstractWordCountTest.java | 10 +-- .../hadoop/GridHadoopCommandLineTest.java | 18 ++--- ...idHadoopDefaultMapReducePlannerSelfTest.java | 36 ++++----- .../hadoop/GridHadoopFileSystemsTest.java | 6 +- .../hadoop/GridHadoopGroupingTest.java | 2 +- .../hadoop/GridHadoopJobTrackerSelfTest.java | 6 +- .../GridHadoopMapReduceEmbeddedSelfTest.java | 8 +- .../hadoop/GridHadoopMapReduceTest.java | 12 +-- .../hadoop/GridHadoopSortingTest.java | 12 +-- .../hadoop/GridHadoopTaskExecutionSelfTest.java | 12 +-- .../hadoop/GridHadoopTasksAllVersionsTest.java | 24 +++--- ...GridHadoopExternalTaskExecutionSelfTest.java | 6 +- .../igfs/IgfsPerformanceBenchmark.java | 18 ++--- 34 files changed, 398 insertions(+), 398 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v1/IgfsHadoopFileSystem.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v1/IgfsHadoopFileSystem.java b/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v1/IgfsHadoopFileSystem.java index 42f5c5f..b877b86 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v1/IgfsHadoopFileSystem.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v1/IgfsHadoopFileSystem.java @@ -141,7 +141,7 @@ public class IgfsHadoopFileSystem extends FileSystem { private boolean seqReadsBeforePrefetchOverride; /** IGFS group block size. */ - private long ggfsGrpBlockSize; + private long igfsGrpBlockSize; /** Flag that controls whether file writes should be colocated. */ private boolean colocateFileWrites; @@ -247,7 +247,7 @@ public class IgfsHadoopFileSystem extends FileSystem { // Handshake. IgfsHandshakeResponse handshake = rmtClient.handshake(logDir); - ggfsGrpBlockSize = handshake.blockSize(); + igfsGrpBlockSize = handshake.blockSize(); IgfsPaths paths = handshake.secondaryPaths(); @@ -527,13 +527,13 @@ public class IgfsHadoopFileSystem extends FileSystem { LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']'); - IgfsHadoopInputStream ggfsIn = new IgfsHadoopInputStream(stream, stream.length(), + IgfsHadoopInputStream igfsIn = new IgfsHadoopInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId); if (LOG.isDebugEnabled()) LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']'); - return new FSDataInputStream(ggfsIn); + return new FSDataInputStream(igfsIn); } } finally { @@ -598,12 +598,12 @@ public class IgfsHadoopFileSystem extends FileSystem { if (LOG.isDebugEnabled()) LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']'); - IgfsHadoopOutputStream ggfsOut = new IgfsHadoopOutputStream(stream, LOG, clientLog, + IgfsHadoopOutputStream igfsOut = new IgfsHadoopOutputStream(stream, LOG, clientLog, logId); bufSize = Math.max(64 * 1024, bufSize); - out = new BufferedOutputStream(ggfsOut, bufSize); + out = new BufferedOutputStream(igfsOut, bufSize); FSDataOutputStream res = new FSDataOutputStream(out, null, 0); @@ -672,12 +672,12 @@ public class IgfsHadoopFileSystem extends FileSystem { if (LOG.isDebugEnabled()) LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']'); - IgfsHadoopOutputStream ggfsOut = new IgfsHadoopOutputStream(stream, LOG, clientLog, + IgfsHadoopOutputStream igfsOut = new IgfsHadoopOutputStream(stream, LOG, clientLog, logId); bufSize = Math.max(64 * 1024, bufSize); - BufferedOutputStream out = new BufferedOutputStream(ggfsOut, bufSize); + BufferedOutputStream out = new BufferedOutputStream(igfsOut, bufSize); return new FSDataOutputStream(out, null, 0); } @@ -1050,7 +1050,7 @@ public class IgfsHadoopFileSystem extends FileSystem { /** {@inheritDoc} */ @SuppressWarnings("deprecation") @Override public long getDefaultBlockSize() { - return ggfsGrpBlockSize; + return igfsGrpBlockSize; } /** http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v2/IgfsHadoopFileSystem.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v2/IgfsHadoopFileSystem.java b/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v2/IgfsHadoopFileSystem.java index 856505b..c3642d4 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v2/IgfsHadoopFileSystem.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v2/IgfsHadoopFileSystem.java @@ -466,13 +466,13 @@ public class IgfsHadoopFileSystem extends AbstractFileSystem implements Closeabl LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']'); - IgfsHadoopInputStream ggfsIn = new IgfsHadoopInputStream(stream, stream.length(), + IgfsHadoopInputStream igfsIn = new IgfsHadoopInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId); if (LOG.isDebugEnabled()) LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']'); - return new FSDataInputStream(ggfsIn); + return new FSDataInputStream(igfsIn); } } finally { @@ -565,12 +565,12 @@ public class IgfsHadoopFileSystem extends AbstractFileSystem implements Closeabl assert stream != null; - IgfsHadoopOutputStream ggfsOut = new IgfsHadoopOutputStream(stream, LOG, + IgfsHadoopOutputStream igfsOut = new IgfsHadoopOutputStream(stream, LOG, clientLog, logId); bufSize = Math.max(64 * 1024, bufSize); - out = new BufferedOutputStream(ggfsOut, bufSize); + out = new BufferedOutputStream(igfsOut, bufSize); FSDataOutputStream res = new FSDataOutputStream(out, null, 0); @@ -783,18 +783,18 @@ public class IgfsHadoopFileSystem extends AbstractFileSystem implements Closeabl @Override public BlockLocation[] getFileBlockLocations(Path path, long start, long len) throws IOException { A.notNull(path, "path"); - IgfsPath ggfsPath = convert(path); + IgfsPath igfsPath = convert(path); enterBusy(); try { - if (modeRslvr.resolveMode(ggfsPath) == PROXY) + if (modeRslvr.resolveMode(igfsPath) == PROXY) return secondaryFs.getFileBlockLocations(path, start, len); else { long now = System.currentTimeMillis(); List<IgfsBlockLocation> affinity = new ArrayList<>( - rmtClient.affinity(ggfsPath, start, len)); + rmtClient.affinity(igfsPath, start, len)); BlockLocation[] arr = new BlockLocation[affinity.size()]; http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopEndpoint.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopEndpoint.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopEndpoint.java index 2855c10..35638ea 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopEndpoint.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopEndpoint.java @@ -36,7 +36,7 @@ public class IgfsHadoopEndpoint { public static final String LOCALHOST = "127.0.0.1"; /** IGFS name. */ - private final String ggfsName; + private final String igfsName; /** Grid name. */ private final String gridName; @@ -63,8 +63,8 @@ public class IgfsHadoopEndpoint { StringBuilder sb = new StringBuilder(); - if (endpoint.ggfs() != null) - sb.append(endpoint.ggfs()); + if (endpoint.igfs() != null) + sb.append(endpoint.igfs()); if (endpoint.grid() != null) sb.append(":").append(endpoint.grid()); @@ -92,7 +92,7 @@ public class IgfsHadoopEndpoint { IgniteBiTuple<String, Integer> hostPort; if (tokens.length == 1) { - ggfsName = null; + igfsName = null; gridName = null; hostPort = hostPort(connStr, connStr); @@ -102,12 +102,12 @@ public class IgfsHadoopEndpoint { if (authStr.isEmpty()) { gridName = null; - ggfsName = null; + igfsName = null; } else { String[] authTokens = authStr.split(":", -1); - ggfsName = F.isEmpty(authTokens[0]) ? null : authTokens[0]; + igfsName = F.isEmpty(authTokens[0]) ? null : authTokens[0]; if (authTokens.length == 1) gridName = null; @@ -171,8 +171,8 @@ public class IgfsHadoopEndpoint { /** * @return IGFS name. */ - @Nullable public String ggfs() { - return ggfsName; + @Nullable public String igfs() { + return igfsName; } /** http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopFileSystemWrapper.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopFileSystemWrapper.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopFileSystemWrapper.java index eb41de0..9935466 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopFileSystemWrapper.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopFileSystemWrapper.java @@ -101,13 +101,13 @@ public class IgfsHadoopFileSystemWrapper implements Igfs, AutoCloseable { boolean wrongVer = X.hasCause(e, RemoteException.class) || (e.getMessage() != null && e.getMessage().contains("Failed on local")); - IgfsException ggfsErr = !wrongVer ? cast(detailMsg, e) : + IgfsException igfsErr = !wrongVer ? cast(detailMsg, e) : new IgfsInvalidHdfsVersionException("HDFS version you are connecting to differs from local " + "version.", e); - return ggfsErr; + return igfsErr; } /** http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopInProc.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopInProc.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopInProc.java index 5c5a61e..8245125 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopInProc.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopInProc.java @@ -33,7 +33,7 @@ import java.util.concurrent.*; */ public class IgfsHadoopInProc implements IgfsHadoopEx { /** Target IGFS. */ - private final IgfsEx ggfs; + private final IgfsEx igfs; /** Buffer size. */ private final int bufSize; @@ -48,22 +48,22 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { /** * Constructor. * - * @param ggfs Target IGFS. + * @param igfs Target IGFS. * @param log Log. */ - public IgfsHadoopInProc(IgfsEx ggfs, Log log) { - this.ggfs = ggfs; + public IgfsHadoopInProc(IgfsEx igfs, Log log) { + this.igfs = igfs; this.log = log; - bufSize = ggfs.configuration().getBlockSize() * 2; + bufSize = igfs.configuration().getBlockSize() * 2; } /** {@inheritDoc} */ @Override public IgfsHandshakeResponse handshake(String logDir) { - ggfs.clientLogDirectory(logDir); + igfs.clientLogDirectory(logDir); - return new IgfsHandshakeResponse(ggfs.name(), ggfs.proxyPaths(), ggfs.groupBlockSize(), - ggfs.globalSampling()); + return new IgfsHandshakeResponse(igfs.name(), igfs.proxyPaths(), igfs.groupBlockSize(), + igfs.globalSampling()); } /** {@inheritDoc} */ @@ -83,7 +83,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { /** {@inheritDoc} */ @Override public IgfsFile info(IgfsPath path) throws IgniteCheckedException { try { - return ggfs.info(path); + return igfs.info(path); } catch (IgniteException e) { throw new IgniteCheckedException(e); @@ -96,7 +96,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { /** {@inheritDoc} */ @Override public IgfsFile update(IgfsPath path, Map<String, String> props) throws IgniteCheckedException { try { - return ggfs.update(path, props); + return igfs.update(path, props); } catch (IgniteException e) { throw new IgniteCheckedException(e); @@ -109,7 +109,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { /** {@inheritDoc} */ @Override public Boolean setTimes(IgfsPath path, long accessTime, long modificationTime) throws IgniteCheckedException { try { - ggfs.setTimes(path, accessTime, modificationTime); + igfs.setTimes(path, accessTime, modificationTime); return true; } @@ -125,7 +125,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { /** {@inheritDoc} */ @Override public Boolean rename(IgfsPath src, IgfsPath dest) throws IgniteCheckedException { try { - ggfs.rename(src, dest); + igfs.rename(src, dest); return true; } @@ -140,7 +140,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { /** {@inheritDoc} */ @Override public Boolean delete(IgfsPath path, boolean recursive) throws IgniteCheckedException { try { - return ggfs.delete(path, recursive); + return igfs.delete(path, recursive); } catch (IgniteException e) { throw new IgniteCheckedException(e); @@ -153,7 +153,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { /** {@inheritDoc} */ @Override public IgfsStatus fsStatus() throws IgniteCheckedException { try { - return ggfs.globalSpace(); + return igfs.globalSpace(); } catch (IllegalStateException e) { throw new IgfsHadoopCommunicationException("Failed to get file system status because Grid is " + @@ -164,7 +164,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { /** {@inheritDoc} */ @Override public Collection<IgfsPath> listPaths(IgfsPath path) throws IgniteCheckedException { try { - return ggfs.listPaths(path); + return igfs.listPaths(path); } catch (IgniteException e) { throw new IgniteCheckedException(e); @@ -177,7 +177,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { /** {@inheritDoc} */ @Override public Collection<IgfsFile> listFiles(IgfsPath path) throws IgniteCheckedException { try { - return ggfs.listFiles(path); + return igfs.listFiles(path); } catch (IgniteException e) { throw new IgniteCheckedException(e); @@ -190,7 +190,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { /** {@inheritDoc} */ @Override public Boolean mkdirs(IgfsPath path, Map<String, String> props) throws IgniteCheckedException { try { - ggfs.mkdirs(path, props); + igfs.mkdirs(path, props); return true; } @@ -206,7 +206,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { /** {@inheritDoc} */ @Override public IgfsPathSummary contentSummary(IgfsPath path) throws IgniteCheckedException { try { - return ggfs.summary(path); + return igfs.summary(path); } catch (IgniteException e) { throw new IgniteCheckedException(e); @@ -221,7 +221,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { @Override public Collection<IgfsBlockLocation> affinity(IgfsPath path, long start, long len) throws IgniteCheckedException { try { - return ggfs.affinity(path, start, len); + return igfs.affinity(path, start, len); } catch (IgniteException e) { throw new IgniteCheckedException(e); @@ -234,7 +234,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { /** {@inheritDoc} */ @Override public IgfsHadoopStreamDelegate open(IgfsPath path) throws IgniteCheckedException { try { - IgfsInputStreamAdapter stream = ggfs.open(path, bufSize); + IgfsInputStreamAdapter stream = igfs.open(path, bufSize); return new IgfsHadoopStreamDelegate(this, stream, stream.fileInfo().length()); } @@ -250,7 +250,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { @Override public IgfsHadoopStreamDelegate open(IgfsPath path, int seqReadsBeforePrefetch) throws IgniteCheckedException { try { - IgfsInputStreamAdapter stream = ggfs.open(path, bufSize, seqReadsBeforePrefetch); + IgfsInputStreamAdapter stream = igfs.open(path, bufSize, seqReadsBeforePrefetch); return new IgfsHadoopStreamDelegate(this, stream, stream.fileInfo().length()); } @@ -266,8 +266,8 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { @Override public IgfsHadoopStreamDelegate create(IgfsPath path, boolean overwrite, boolean colocate, int replication, long blockSize, @Nullable Map<String, String> props) throws IgniteCheckedException { try { - IgfsOutputStream stream = ggfs.create(path, bufSize, overwrite, - colocate ? ggfs.nextAffinityKey() : null, replication, blockSize, props); + IgfsOutputStream stream = igfs.create(path, bufSize, overwrite, + colocate ? igfs.nextAffinityKey() : null, replication, blockSize, props); return new IgfsHadoopStreamDelegate(this, stream); } @@ -283,7 +283,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { @Override public IgfsHadoopStreamDelegate append(IgfsPath path, boolean create, @Nullable Map<String, String> props) throws IgniteCheckedException { try { - IgfsOutputStream stream = ggfs.append(path, bufSize, create, props); + IgfsOutputStream stream = igfs.append(path, bufSize, create, props); return new IgfsHadoopStreamDelegate(this, stream); } http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopOutProc.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopOutProc.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopOutProc.java index b66bdd1..6b5242f 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopOutProc.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopOutProc.java @@ -76,7 +76,7 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener private final String grid; /** IGFS name. */ - private final String ggfs; + private final String igfs; /** Client log. */ private final Log log; @@ -93,12 +93,12 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener * @param host Host. * @param port Port. * @param grid Grid name. - * @param ggfs IGFS name. + * @param igfs IGFS name. * @param log Client logger. * @throws IOException If failed. */ - public IgfsHadoopOutProc(String host, int port, String grid, String ggfs, Log log) throws IOException { - this(host, port, grid, ggfs, false, log); + public IgfsHadoopOutProc(String host, int port, String grid, String igfs, Log log) throws IOException { + this(host, port, grid, igfs, false, log); } /** @@ -106,12 +106,12 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener * * @param port Port. * @param grid Grid name. - * @param ggfs IGFS name. + * @param igfs IGFS name. * @param log Client logger. * @throws IOException If failed. */ - public IgfsHadoopOutProc(int port, String grid, String ggfs, Log log) throws IOException { - this(null, port, grid, ggfs, true, log); + public IgfsHadoopOutProc(int port, String grid, String igfs, Log log) throws IOException { + this(null, port, grid, igfs, true, log); } /** @@ -120,12 +120,12 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener * @param host Host. * @param port Port. * @param grid Grid name. - * @param ggfs IGFS name. + * @param igfs IGFS name. * @param shmem Shared memory flag. * @param log Client logger. * @throws IOException If failed. */ - private IgfsHadoopOutProc(String host, int port, String grid, String ggfs, boolean shmem, Log log) + private IgfsHadoopOutProc(String host, int port, String grid, String igfs, boolean shmem, Log log) throws IOException { assert host != null && !shmem || host == null && shmem : "Invalid arguments [host=" + host + ", port=" + port + ", shmem=" + shmem + ']'; @@ -133,7 +133,7 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener String endpoint = host != null ? host + ":" + port : "shmem:" + port; this.grid = grid; - this.ggfs = ggfs; + this.igfs = igfs; this.log = log; io = IgfsHadoopIpcIo.get(log, endpoint); @@ -146,7 +146,7 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener final IgfsHandshakeRequest req = new IgfsHandshakeRequest(); req.gridName(grid); - req.igfsName(ggfs); + req.igfsName(igfs); req.logDirectory(logDir); return io.send(req).chain(HANDSHAKE_RES).get(); http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopWrapper.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopWrapper.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopWrapper.java index 8ba369a..3ac2d5a 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopWrapper.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopWrapper.java @@ -333,13 +333,13 @@ public class IgfsHadoopWrapper implements IgfsHadoop { // 2. Guess that we are in the same VM. if (!parameter(conf, PARAM_IGFS_ENDPOINT_NO_EMBED, authority, false)) { - IgfsEx ggfs = null; + IgfsEx igfs = null; if (endpoint.grid() == null) { try { Ignite ignite = G.ignite(); - ggfs = (IgfsEx)ignite.fileSystem(endpoint.ggfs()); + igfs = (IgfsEx)ignite.fileSystem(endpoint.igfs()); } catch (Exception e) { err = e; @@ -348,7 +348,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { else { for (Ignite ignite : G.allGrids()) { try { - ggfs = (IgfsEx)ignite.fileSystem(endpoint.ggfs()); + igfs = (IgfsEx)ignite.fileSystem(endpoint.igfs()); break; } @@ -358,11 +358,11 @@ public class IgfsHadoopWrapper implements IgfsHadoop { } } - if (ggfs != null) { + if (igfs != null) { IgfsHadoopEx hadoop = null; try { - hadoop = new IgfsHadoopInProc(ggfs, log); + hadoop = new IgfsHadoopInProc(igfs, log); curDelegate = new Delegate(hadoop, hadoop.handshake(logDir)); } @@ -384,7 +384,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { IgfsHadoopEx hadoop = null; try { - hadoop = new IgfsHadoopOutProc(endpoint.port(), endpoint.grid(), endpoint.ggfs(), log); + hadoop = new IgfsHadoopOutProc(endpoint.port(), endpoint.grid(), endpoint.igfs(), log); curDelegate = new Delegate(hadoop, hadoop.handshake(logDir)); } @@ -408,7 +408,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { IgfsHadoopEx hadoop = null; try { - hadoop = new IgfsHadoopOutProc(LOCALHOST, endpoint.port(), endpoint.grid(), endpoint.ggfs(), + hadoop = new IgfsHadoopOutProc(LOCALHOST, endpoint.port(), endpoint.grid(), endpoint.igfs(), log); curDelegate = new Delegate(hadoop, hadoop.handshake(logDir)); @@ -430,7 +430,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { IgfsHadoopEx hadoop = null; try { - hadoop = new IgfsHadoopOutProc(endpoint.host(), endpoint.port(), endpoint.grid(), endpoint.ggfs(), log); + hadoop = new IgfsHadoopOutProc(endpoint.host(), endpoint.port(), endpoint.grid(), endpoint.igfs(), log); curDelegate = new Delegate(hadoop, hadoop.handshake(logDir)); } http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopClassLoader.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopClassLoader.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopClassLoader.java index 1f347c8..11f8358 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopClassLoader.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopClassLoader.java @@ -73,11 +73,11 @@ public class GridHadoopClassLoader extends URLClassLoader { * @param cls Class name. * @return {@code true} if we need to check this class. */ - private static boolean isGgfsOrGgHadoop(String cls) { + private static boolean isIgfsOrGgHadoop(String cls) { String gg = "org.apache.ignite"; int len = gg.length(); - return cls.startsWith(gg) && (cls.indexOf("ignitefs.", len) != -1 || cls.indexOf(".fs.", len) != -1 || cls.indexOf("hadoop.", len) != -1); + return cls.startsWith(gg) && (cls.indexOf("igfs.", len) != -1 || cls.indexOf(".fs.", len) != -1 || cls.indexOf("hadoop.", len) != -1); } /** @@ -100,7 +100,7 @@ public class GridHadoopClassLoader extends URLClassLoader { return loadClassExplicitly(name, resolve); } - if (isGgfsOrGgHadoop(name)) { // For GG Hadoop and IGFS classes we have to check if they depend on Hadoop. + if (isIgfsOrGgHadoop(name)) { // For GG Hadoop and IGFS classes we have to check if they depend on Hadoop. Boolean hasDeps = cache.get(name); if (hasDeps == null) { @@ -224,7 +224,7 @@ public class GridHadoopClassLoader extends URLClassLoader { if (in == null) // The class is external itself, it must be loaded from this class loader. return true; - if (!isGgfsOrGgHadoop(clsName)) // Other classes should not have external dependencies. + if (!isIgfsOrGgHadoop(clsName)) // Other classes should not have external dependencies. return false; final ClassReader rdr; http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/GridHadoopDefaultMapReducePlanner.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/GridHadoopDefaultMapReducePlanner.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/GridHadoopDefaultMapReducePlanner.java index 44eb404..8e85cc1 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/GridHadoopDefaultMapReducePlanner.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/GridHadoopDefaultMapReducePlanner.java @@ -155,16 +155,16 @@ public class GridHadoopDefaultMapReducePlanner implements GridHadoopMapReducePla if (IGFS_SCHEME.equalsIgnoreCase(split0.file().getScheme())) { IgfsHadoopEndpoint endpoint = new IgfsHadoopEndpoint(split0.file().getAuthority()); - IgfsEx ggfs = null; + IgfsEx igfs = null; if (F.eq(ignite.name(), endpoint.grid())) - ggfs = (IgfsEx)((IgniteEx)ignite).igfsx(endpoint.ggfs()); + igfs = (IgfsEx)((IgniteEx)ignite).igfsx(endpoint.igfs()); - if (ggfs != null && !ggfs.isProxy(split0.file())) { + if (igfs != null && !igfs.isProxy(split0.file())) { Collection<IgfsBlockLocation> blocks; try { - blocks = ggfs.affinity(new IgfsPath(split0.file()), split0.start(), split0.length()); + blocks = igfs.affinity(new IgfsPath(split0.file()), split0.start(), split0.length()); } catch (IgniteException e) { throw new IgniteCheckedException(e); http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocolSelfTest.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocolSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocolSelfTest.java index a96eb82..0d4b0fc 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocolSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocolSelfTest.java @@ -67,7 +67,7 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest } /** {@inheritDoc} */ - @Override protected boolean ggfsEnabled() { + @Override protected boolean igfsEnabled() { return true; } @@ -111,7 +111,7 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { - grid(0).fileSystem(GridHadoopAbstractSelfTest.ggfsName).format(); + grid(0).fileSystem(GridHadoopAbstractSelfTest.igfsName).format(); setupLockFile.delete(); mapLockFile.delete(); @@ -150,11 +150,11 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest * @throws Exception If failed. */ public void testJobCounters() throws Exception { - IgniteFs ggfs = grid(0).fileSystem(GridHadoopAbstractSelfTest.ggfsName); + IgniteFs igfs = grid(0).fileSystem(GridHadoopAbstractSelfTest.igfsName); - ggfs.mkdirs(new IgfsPath(PATH_INPUT)); + igfs.mkdirs(new IgfsPath(PATH_INPUT)); - try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(ggfs.create( + try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create( new IgfsPath(PATH_INPUT + "/test.file"), true)))) { bw.write( @@ -268,11 +268,11 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest * @throws Exception If failed. */ public void checkJobSubmit(boolean noCombiners, boolean noReducers) throws Exception { - IgniteFs ggfs = grid(0).fileSystem(GridHadoopAbstractSelfTest.ggfsName); + IgniteFs igfs = grid(0).fileSystem(GridHadoopAbstractSelfTest.igfsName); - ggfs.mkdirs(new IgfsPath(PATH_INPUT)); + igfs.mkdirs(new IgfsPath(PATH_INPUT)); - try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(ggfs.create( + try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create( new IgfsPath(PATH_INPUT + "/test.file"), true)))) { bw.write("word"); @@ -389,30 +389,30 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest assert jobStatus.getMapProgress() == 1.0f; assert jobStatus.getReduceProgress() == 1.0f; - dumpGgfs(ggfs, new IgfsPath(PATH_OUTPUT)); + dumpIgfs(igfs, new IgfsPath(PATH_OUTPUT)); } /** * Dump IGFS content. * - * @param ggfs IGFS. + * @param igfs IGFS. * @param path Path. * @throws Exception If failed. */ @SuppressWarnings("ConstantConditions") - private static void dumpGgfs(IgniteFs ggfs, IgfsPath path) throws Exception { - IgfsFile file = ggfs.info(path); + private static void dumpIgfs(IgniteFs igfs, IgfsPath path) throws Exception { + IgfsFile file = igfs.info(path); assert file != null; System.out.println(file.path()); if (file.isDirectory()) { - for (IgfsPath child : ggfs.listPaths(path)) - dumpGgfs(ggfs, child); + for (IgfsPath child : igfs.listPaths(path)) + dumpIgfs(igfs, child); } else { - try (BufferedReader br = new BufferedReader(new InputStreamReader(ggfs.open(path)))) { + try (BufferedReader br = new BufferedReader(new InputStreamReader(igfs.open(path)))) { String line = br.readLine(); while (line != null) { http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java index a685091..05a7b1d 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java @@ -77,14 +77,14 @@ public class IgfsEventsTestSuite extends TestSuite { public static class ShmemPrivate extends IgfsEventsAbstractSelfTest { /** {@inheritDoc} */ @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration ggfsCfg = super.getIgfsConfiguration(); + IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); - ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ + igfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ put("type", "shmem"); put("port", String.valueOf(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + 1)); }}); - return ggfsCfg; + return igfsCfg; } } @@ -94,14 +94,14 @@ public class IgfsEventsTestSuite extends TestSuite { public static class LoopbackPrivate extends IgfsEventsAbstractSelfTest { /** {@inheritDoc} */ @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration ggfsCfg = super.getIgfsConfiguration(); + IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); - ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ + igfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ put("type", "tcp"); put("port", String.valueOf(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + 1)); }}); - return ggfsCfg; + return igfsCfg; } } @@ -110,38 +110,38 @@ public class IgfsEventsTestSuite extends TestSuite { */ public abstract static class PrimarySecondaryTest extends IgfsEventsAbstractSelfTest { /** Secondary file system. */ - private static IgniteFs ggfsSec; + private static IgniteFs igfsSec; /** {@inheritDoc} */ @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration ggfsCfg = super.getIgfsConfiguration(); + IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); - ggfsCfg.setSecondaryFileSystem(new IgfsHadoopFileSystemWrapper( + igfsCfg.setSecondaryFileSystem(new IgfsHadoopFileSystemWrapper( "igfs://igfs-secondary:grid-secondary@127.0.0.1:11500/", "modules/core/src/test/config/hadoop/core-site-secondary.xml")); - return ggfsCfg; + return igfsCfg; } /** * @return IGFS configuration for secondary file system. */ - protected IgfsConfiguration getSecondaryGgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration ggfsCfg = super.getIgfsConfiguration(); + protected IgfsConfiguration getSecondaryIgfsConfiguration() throws IgniteCheckedException { + IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); - ggfsCfg.setName("igfs-secondary"); - ggfsCfg.setDefaultMode(PRIMARY); - ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>(){{ + igfsCfg.setName("igfs-secondary"); + igfsCfg.setDefaultMode(PRIMARY); + igfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>(){{ put("type", "tcp"); put("port", "11500"); }}); - return ggfsCfg; + return igfsCfg; } /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { - ggfsSec = startSecondary(); + igfsSec = startSecondary(); super.beforeTestsStarted(); } @@ -158,7 +158,7 @@ public class IgfsEventsTestSuite extends TestSuite { super.afterTest(); // Clean up secondary file system. - ggfsSec.format(); + igfsSec.format(); } /** @@ -168,7 +168,7 @@ public class IgfsEventsTestSuite extends TestSuite { * @throws Exception If failed. */ @Nullable private IgniteFs startSecondary() throws Exception { - IgniteConfiguration cfg = getConfiguration("grid-secondary", getSecondaryGgfsConfiguration()); + IgniteConfiguration cfg = getConfiguration("grid-secondary", getSecondaryIgfsConfiguration()); cfg.setLocalHost("127.0.0.1"); cfg.setPeerClassLoadingEnabled(false); @@ -185,11 +185,11 @@ public class IgfsEventsTestSuite extends TestSuite { public static class ShmemDualSync extends PrimarySecondaryTest { /** {@inheritDoc} */ @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration ggfsCfg = super.getIgfsConfiguration(); + IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); - ggfsCfg.setDefaultMode(DUAL_SYNC); + igfsCfg.setDefaultMode(DUAL_SYNC); - return ggfsCfg; + return igfsCfg; } } @@ -199,11 +199,11 @@ public class IgfsEventsTestSuite extends TestSuite { public static class ShmemDualAsync extends PrimarySecondaryTest { /** {@inheritDoc} */ @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration ggfsCfg = super.getIgfsConfiguration(); + IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); - ggfsCfg.setDefaultMode(DUAL_ASYNC); + igfsCfg.setDefaultMode(DUAL_ASYNC); - return ggfsCfg; + return igfsCfg; } } @@ -213,27 +213,27 @@ public class IgfsEventsTestSuite extends TestSuite { public abstract static class LoopbackPrimarySecondaryTest extends PrimarySecondaryTest { /** {@inheritDoc} */ @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration ggfsCfg = super.getIgfsConfiguration(); + IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); - ggfsCfg.setSecondaryFileSystem(new IgfsHadoopFileSystemWrapper( + igfsCfg.setSecondaryFileSystem(new IgfsHadoopFileSystemWrapper( "igfs://igfs-secondary:grid-secondary@127.0.0.1:11500/", "modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml")); - return ggfsCfg; + return igfsCfg; } /** {@inheritDoc} */ - @Override protected IgfsConfiguration getSecondaryGgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration ggfsCfg = super.getSecondaryGgfsConfiguration(); + @Override protected IgfsConfiguration getSecondaryIgfsConfiguration() throws IgniteCheckedException { + IgfsConfiguration igfsCfg = super.getSecondaryIgfsConfiguration(); - ggfsCfg.setName("igfs-secondary"); - ggfsCfg.setDefaultMode(PRIMARY); - ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ + igfsCfg.setName("igfs-secondary"); + igfsCfg.setDefaultMode(PRIMARY); + igfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ put("type", "tcp"); put("port", "11500"); }}); - return ggfsCfg; + return igfsCfg; } } @@ -243,11 +243,11 @@ public class IgfsEventsTestSuite extends TestSuite { public static class LoopbackDualSync extends LoopbackPrimarySecondaryTest { /** {@inheritDoc} */ @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration ggfsCfg = super.getIgfsConfiguration(); + IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); - ggfsCfg.setDefaultMode(DUAL_SYNC); + igfsCfg.setDefaultMode(DUAL_SYNC); - return ggfsCfg; + return igfsCfg; } } @@ -257,11 +257,11 @@ public class IgfsEventsTestSuite extends TestSuite { public static class LoopbackDualAsync extends LoopbackPrimarySecondaryTest { /** {@inheritDoc} */ @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration ggfsCfg = super.getIgfsConfiguration(); + IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); - ggfsCfg.setDefaultMode(DUAL_ASYNC); + igfsCfg.setDefaultMode(DUAL_ASYNC); - return ggfsCfg; + return igfsCfg; } } } http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoop20FileSystemAbstractSelfTest.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoop20FileSystemAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoop20FileSystemAbstractSelfTest.java index fc1dab0..635c988 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoop20FileSystemAbstractSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoop20FileSystemAbstractSelfTest.java @@ -143,15 +143,15 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA private void startNodes() throws Exception { if (mode != PRIMARY) { // Start secondary IGFS. - IgfsConfiguration ggfsCfg = new IgfsConfiguration(); + IgfsConfiguration igfsCfg = new IgfsConfiguration(); - ggfsCfg.setDataCacheName("partitioned"); - ggfsCfg.setMetaCacheName("replicated"); - ggfsCfg.setName("ggfs_secondary"); - ggfsCfg.setIpcEndpointConfiguration(secondaryIpcEndpointConfiguration()); - ggfsCfg.setManagementPort(-1); - ggfsCfg.setBlockSize(512 * 1024); - ggfsCfg.setPrefetchBlocks(1); + igfsCfg.setDataCacheName("partitioned"); + igfsCfg.setMetaCacheName("replicated"); + igfsCfg.setName("igfs_secondary"); + igfsCfg.setIpcEndpointConfiguration(secondaryIpcEndpointConfiguration()); + igfsCfg.setManagementPort(-1); + igfsCfg.setBlockSize(512 * 1024); + igfsCfg.setPrefetchBlocks(1); CacheConfiguration cacheCfg = defaultCacheConfiguration(); @@ -182,7 +182,7 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(metaCacheCfg, cacheCfg); - cfg.setIgfsConfiguration(ggfsCfg); + cfg.setIgfsConfiguration(igfsCfg); cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED); cfg.setLocalHost(U.getLocalHost().getHostAddress()); cfg.setCommunicationSpi(communicationSpi()); @@ -208,7 +208,7 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(cacheConfiguration(gridName)); - cfg.setIgfsConfiguration(ggfsConfiguration(gridName)); + cfg.setIgfsConfiguration(igfsConfiguration(gridName)); cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED); cfg.setLocalHost("127.0.0.1"); cfg.setCommunicationSpi(communicationSpi()); @@ -251,7 +251,7 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA * @param gridName Grid name. * @return IGFS configuration. */ - protected IgfsConfiguration ggfsConfiguration(String gridName) throws IgniteCheckedException { + protected IgfsConfiguration igfsConfiguration(String gridName) throws IgniteCheckedException { IgfsConfiguration cfg = new IgfsConfiguration(); cfg.setDataCacheName("partitioned"); @@ -327,9 +327,9 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA long used = 0, max = 0; for (int i = 0; i < 4; i++) { - IgniteFs ggfs = grid(i).fileSystem("igfs"); + IgniteFs igfs = grid(i).fileSystem("igfs"); - IgfsMetrics metrics = ggfs.metrics(); + IgfsMetrics metrics = igfs.metrics(); used += metrics.localSpaceSize(); max += metrics.maxSpaceSize(); @@ -1132,10 +1132,10 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA * @throws Exception If failed. */ public void testListStatus() throws Exception { - Path ggfsHome = new Path(primaryFsUri); + Path igfsHome = new Path(primaryFsUri); // Test listing of an empty directory. - Path dir = new Path(ggfsHome, "dir"); + Path dir = new Path(igfsHome, "dir"); fs.mkdir(dir, FsPermission.getDefault(), true); @@ -1249,9 +1249,9 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA /** @throws Exception If failed. */ public void testGetFileBlockLocations() throws Exception { - Path ggfsHome = new Path(primaryFsUri); + Path igfsHome = new Path(primaryFsUri); - Path file = new Path(ggfsHome, "someFile"); + Path file = new Path(igfsHome, "someFile"); try (OutputStream out = new BufferedOutputStream(fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())))) { @@ -1289,9 +1289,9 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA public void testZeroReplicationFactor() throws Exception { // This test doesn't make sense for any mode except of PRIMARY. if (mode == PRIMARY) { - Path ggfsHome = new Path(primaryFsUri); + Path igfsHome = new Path(primaryFsUri); - Path file = new Path(ggfsHome, "someFile"); + Path file = new Path(igfsHome, "someFile"); try (FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()), Options.CreateOpts.repFac((short)1))) { @@ -1721,9 +1721,9 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA * @throws Exception If error occurs. */ public void testClientReconnect() throws Exception { - final Path ggfsHome = new Path(primaryFsUri); + final Path igfsHome = new Path(primaryFsUri); - final Path filePath = new Path(ggfsHome, "someFile"); + final Path filePath = new Path(igfsHome, "someFile"); final FSDataOutputStream s = fs.create(filePath, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); // Open stream before stopping IGFS. @@ -1815,9 +1815,9 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA */ private void checkConsistency(int createBufSize, int writeCntsInCreate, int openAfterCreateBufSize, int appendBufSize, int writeCntsInAppend, int openAfterAppendBufSize) throws Exception { - final Path ggfsHome = new Path(primaryFsUri); + final Path igfsHome = new Path(primaryFsUri); - Path file = new Path(ggfsHome, "/someDir/someInnerDir/someFile"); + Path file = new Path(igfsHome, "/someDir/someInnerDir/someFile"); if (createBufSize == -1) createBufSize = fs.getServerDefaults().getFileBufferSize(); http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopDualAbstractSelfTest.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopDualAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopDualAbstractSelfTest.java index b21f022..22c144f 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopDualAbstractSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopDualAbstractSelfTest.java @@ -93,10 +93,10 @@ public abstract class IgfsHadoopDualAbstractSelfTest extends IgfsCommonAbstractT protected static byte[] chunk; /** Primary IGFS. */ - protected static IgfsImpl ggfs; + protected static IgfsImpl igfs; /** Secondary IGFS. */ - protected static IgfsImpl ggfsSecondary; + protected static IgfsImpl igfsSecondary; /** IGFS mode. */ protected final IgfsMode mode; @@ -115,26 +115,26 @@ public abstract class IgfsHadoopDualAbstractSelfTest extends IgfsCommonAbstractT * Start grid with IGFS. * * @param gridName Grid name. - * @param ggfsName IGFS name + * @param igfsName IGFS name * @param mode IGFS mode. * @param secondaryFs Secondary file system (optional). * @param restCfg Rest configuration string (optional). * @return Started grid instance. * @throws Exception If failed. */ - protected Ignite startGridWithGgfs(String gridName, String ggfsName, IgfsMode mode, + protected Ignite startGridWithIgfs(String gridName, String igfsName, IgfsMode mode, @Nullable Igfs secondaryFs, @Nullable Map<String, String> restCfg) throws Exception { - IgfsConfiguration ggfsCfg = new IgfsConfiguration(); - - ggfsCfg.setDataCacheName("dataCache"); - ggfsCfg.setMetaCacheName("metaCache"); - ggfsCfg.setName(ggfsName); - ggfsCfg.setBlockSize(IGFS_BLOCK_SIZE); - ggfsCfg.setDefaultMode(mode); - ggfsCfg.setIpcEndpointConfiguration(restCfg); - ggfsCfg.setSecondaryFileSystem(secondaryFs); - ggfsCfg.setPrefetchBlocks(PREFETCH_BLOCKS); - ggfsCfg.setSequentialReadsBeforePrefetch(SEQ_READS_BEFORE_PREFETCH); + IgfsConfiguration igfsCfg = new IgfsConfiguration(); + + igfsCfg.setDataCacheName("dataCache"); + igfsCfg.setMetaCacheName("metaCache"); + igfsCfg.setName(igfsName); + igfsCfg.setBlockSize(IGFS_BLOCK_SIZE); + igfsCfg.setDefaultMode(mode); + igfsCfg.setIpcEndpointConfiguration(restCfg); + igfsCfg.setSecondaryFileSystem(secondaryFs); + igfsCfg.setPrefetchBlocks(PREFETCH_BLOCKS); + igfsCfg.setSequentialReadsBeforePrefetch(SEQ_READS_BEFORE_PREFETCH); CacheConfiguration dataCacheCfg = defaultCacheConfiguration(); @@ -166,7 +166,7 @@ public abstract class IgfsHadoopDualAbstractSelfTest extends IgfsCommonAbstractT cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg); - cfg.setIgfsConfiguration(ggfsCfg); + cfg.setIgfsConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); cfg.setConnectorConfiguration(null); @@ -181,20 +181,20 @@ public abstract class IgfsHadoopDualAbstractSelfTest extends IgfsCommonAbstractT for (int i = 0; i < chunk.length; i++) chunk[i] = (byte)i; - Ignite igniteSecondary = startGridWithGgfs("grid-secondary", "igfs-secondary", PRIMARY, null, SECONDARY_REST_CFG); + Ignite igniteSecondary = startGridWithIgfs("grid-secondary", "igfs-secondary", PRIMARY, null, SECONDARY_REST_CFG); Igfs hadoopFs = new IgfsHadoopFileSystemWrapper(SECONDARY_URI, SECONDARY_CFG); - Ignite ignite = startGridWithGgfs("grid", "igfs", mode, hadoopFs, PRIMARY_REST_CFG); + Ignite ignite = startGridWithIgfs("grid", "igfs", mode, hadoopFs, PRIMARY_REST_CFG); - ggfsSecondary = (IgfsImpl) igniteSecondary.fileSystem("igfs-secondary"); - ggfs = (IgfsImpl) ignite.fileSystem("igfs"); + igfsSecondary = (IgfsImpl) igniteSecondary.fileSystem("igfs-secondary"); + igfs = (IgfsImpl) ignite.fileSystem("igfs"); } /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { - clear(ggfs); - clear(ggfsSecondary); + clear(igfs); + clear(igfsSecondary); } /** {@inheritDoc} */ @@ -218,12 +218,12 @@ public abstract class IgfsHadoopDualAbstractSelfTest extends IgfsCommonAbstractT * @throws Exception IF failed. */ public void testOpenPrefetchOverride() throws Exception { - create(ggfsSecondary, paths(DIR, SUBDIR), paths(FILE)); + create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE)); // Write enough data to the secondary file system. final int blockSize = IGFS_BLOCK_SIZE; - IgfsOutputStream out = ggfsSecondary.append(FILE, false); + IgfsOutputStream out = igfsSecondary.append(FILE, false); int totalWritten = 0; @@ -235,7 +235,7 @@ public abstract class IgfsHadoopDualAbstractSelfTest extends IgfsCommonAbstractT out.close(); - awaitFileClose(ggfsSecondary, FILE); + awaitFileClose(igfsSecondary, FILE); // Instantiate file system with overridden "seq reads before prefetch" property. Configuration cfg = new Configuration(); @@ -261,14 +261,14 @@ public abstract class IgfsHadoopDualAbstractSelfTest extends IgfsCommonAbstractT fsIn.readFully(0, readBuf, 0, readBuf.length); // Wait for a while for prefetch to finish (if any). - IgfsMetaManager meta = ggfs.context().meta(); + IgfsMetaManager meta = igfs.context().meta(); IgfsFileInfo info = meta.info(meta.fileId(FILE)); IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2); - GridCache<IgfsBlockKey, byte[]> dataCache = ggfs.context().kernalContext().cache().cache( - ggfs.configuration().getDataCacheName()); + GridCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache().cache( + igfs.configuration().getDataCacheName()); for (int i = 0; i < 10; i++) { if (dataCache.containsKey(key)) @@ -280,12 +280,12 @@ public abstract class IgfsHadoopDualAbstractSelfTest extends IgfsCommonAbstractT fsIn.close(); // Remove the file from the secondary file system. - ggfsSecondary.delete(FILE, false); + igfsSecondary.delete(FILE, false); // Try reading the third block. Should fail. GridTestUtils.assertThrows(log, new Callable<Object>() { @Override public Object call() throws Exception { - IgfsInputStream in0 = ggfs.open(FILE); + IgfsInputStream in0 = igfs.open(FILE); in0.seek(blockSize * 2); http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemAbstractSelfTest.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemAbstractSelfTest.java index c124136..b3eef32 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemAbstractSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemAbstractSelfTest.java @@ -65,7 +65,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs private static final String PRIMARY_URI = "igfs://" + PRIMARY_AUTHORITY + "/"; /** Secondary file system authority. */ - private static final String SECONDARY_AUTHORITY = "ggfs_secondary:grid_secondary@127.0.0.1:11500"; + private static final String SECONDARY_AUTHORITY = "igfs_secondary:grid_secondary@127.0.0.1:11500"; /** Secondary file systme URI. */ private static final String SECONDARY_URI = "igfs://" + SECONDARY_AUTHORITY + "/"; @@ -172,14 +172,14 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs private void startNodes() throws Exception { if (mode != PRIMARY) { // Start secondary IGFS. - IgfsConfiguration ggfsCfg = new IgfsConfiguration(); + IgfsConfiguration igfsCfg = new IgfsConfiguration(); - ggfsCfg.setDataCacheName("partitioned"); - ggfsCfg.setMetaCacheName("replicated"); - ggfsCfg.setName("ggfs_secondary"); - ggfsCfg.setIpcEndpointConfiguration(SECONDARY_ENDPOINT_CFG); - ggfsCfg.setBlockSize(512 * 1024); - ggfsCfg.setPrefetchBlocks(1); + igfsCfg.setDataCacheName("partitioned"); + igfsCfg.setMetaCacheName("replicated"); + igfsCfg.setName("igfs_secondary"); + igfsCfg.setIpcEndpointConfiguration(SECONDARY_ENDPOINT_CFG); + igfsCfg.setBlockSize(512 * 1024); + igfsCfg.setPrefetchBlocks(1); CacheConfiguration cacheCfg = defaultCacheConfiguration(); @@ -210,7 +210,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(metaCacheCfg, cacheCfg); - cfg.setIgfsConfiguration(ggfsCfg); + cfg.setIgfsConfiguration(igfsCfg); cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED); cfg.setCommunicationSpi(communicationSpi()); @@ -276,7 +276,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(cacheConfiguration(gridName)); - cfg.setIgfsConfiguration(ggfsConfiguration(gridName)); + cfg.setIgfsConfiguration(igfsConfiguration(gridName)); cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED); cfg.setCommunicationSpi(communicationSpi()); @@ -318,7 +318,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs * @param gridName Grid name. * @return IGFS configuration. */ - protected IgfsConfiguration ggfsConfiguration(String gridName) throws IgniteCheckedException { + protected IgfsConfiguration igfsConfiguration(String gridName) throws IgniteCheckedException { IgfsConfiguration cfg = new IgfsConfiguration(); cfg.setDataCacheName("partitioned"); @@ -1306,10 +1306,10 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs * @throws Exception If failed. */ public void testListStatus() throws Exception { - Path ggfsHome = new Path(PRIMARY_URI); + Path igfsHome = new Path(PRIMARY_URI); // Test listing of an empty directory. - Path dir = new Path(ggfsHome, "dir"); + Path dir = new Path(igfsHome, "dir"); assert fs.mkdirs(dir); @@ -1483,9 +1483,9 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs /** @throws Exception If failed. */ public void testGetFileBlockLocations() throws Exception { - Path ggfsHome = new Path(PRIMARY_URI); + Path igfsHome = new Path(PRIMARY_URI); - Path file = new Path(ggfsHome, "someFile"); + Path file = new Path(igfsHome, "someFile"); try (OutputStream out = new BufferedOutputStream(fs.create(file, true, 1024 * 1024))) { byte[] data = new byte[128 * 1024]; @@ -1528,9 +1528,9 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs public void testZeroReplicationFactor() throws Exception { // This test doesn't make sense for any mode except of PRIMARY. if (mode == PRIMARY) { - Path ggfsHome = new Path(PRIMARY_URI); + Path igfsHome = new Path(PRIMARY_URI); - Path file = new Path(ggfsHome, "someFile"); + Path file = new Path(igfsHome, "someFile"); try (FSDataOutputStream out = fs.create(file, (short)0)) { out.write(new byte[1024 * 1024]); @@ -2059,9 +2059,9 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs */ private void checkConsistency(int createBufSize, int writeCntsInCreate, int openAfterCreateBufSize, int appendBufSize, int writeCntsInAppend, int openAfterAppendBufSize) throws Exception { - final Path ggfsHome = new Path(PRIMARY_URI); + final Path igfsHome = new Path(PRIMARY_URI); - Path file = new Path(ggfsHome, "/someDir/someInnerDir/someFile"); + Path file = new Path(igfsHome, "/someDir/someInnerDir/someFile"); FSDataOutputStream os = fs.create(file, true, createBufSize); http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemClientSelfTest.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemClientSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemClientSelfTest.java index b44c2d8..599fd1d 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemClientSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemClientSelfTest.java @@ -66,19 +66,19 @@ public class IgfsHadoopFileSystemClientSelfTest extends IgfsCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); - IgfsConfiguration ggfsCfg = new IgfsConfiguration(); + IgfsConfiguration igfsCfg = new IgfsConfiguration(); - ggfsCfg.setDataCacheName("partitioned"); - ggfsCfg.setMetaCacheName("replicated"); - ggfsCfg.setName("igfs"); - ggfsCfg.setBlockSize(512 * 1024); - ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ + igfsCfg.setDataCacheName("partitioned"); + igfsCfg.setMetaCacheName("replicated"); + igfsCfg.setName("igfs"); + igfsCfg.setBlockSize(512 * 1024); + igfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ put("type", "tcp"); put("port", String.valueOf(DFLT_IPC_PORT)); }}); cfg.setCacheConfiguration(cacheConfiguration()); - cfg.setIgfsConfiguration(ggfsCfg); + cfg.setIgfsConfiguration(igfsCfg); return cfg; } @@ -133,18 +133,18 @@ public class IgfsHadoopFileSystemClientSelfTest extends IgfsCommonAbstractTest { IgfsHadoopStreamDelegate delegate = client.create(path, true, false, 1, 1024, null); - final IgfsHadoopOutputStream ggfsOut = new IgfsHadoopOutputStream(delegate, LOG, + final IgfsHadoopOutputStream igfsOut = new IgfsHadoopOutputStream(delegate, LOG, IgfsLogger.disabledLogger(), 0); // This call should return fine as exception is thrown for the first time. - ggfsOut.write(data); + igfsOut.write(data); U.sleep(500); // This call should throw an IO exception. GridTestUtils.assertThrows(null, new Callable<Object>() { @Override public Object call() throws Exception { - ggfsOut.write(data); + igfsOut.write(data); return null; } @@ -163,17 +163,17 @@ public class IgfsHadoopFileSystemClientSelfTest extends IgfsCommonAbstractTest { */ @SuppressWarnings("ConstantConditions") private void switchHandlerErrorFlag(boolean flag) throws Exception { - IgfsProcessorAdapter ggfsProc = ((IgniteKernal)grid(0)).context().igfs(); + IgfsProcessorAdapter igfsProc = ((IgniteKernal)grid(0)).context().igfs(); - Map<String, IgfsContext> ggfsMap = getField(ggfsProc, "ggfsCache"); + Map<String, IgfsContext> igfsMap = getField(igfsProc, "igfsCache"); - IgfsServerManager srvMgr = F.first(ggfsMap.values()).server(); + IgfsServerManager srvMgr = F.first(igfsMap.values()).server(); Collection<IgfsServer> srvrs = getField(srvMgr, "srvrs"); - IgfsServerHandler ggfsHnd = getField(F.first(srvrs), "hnd"); + IgfsServerHandler igfsHnd = getField(F.first(srvrs), "hnd"); - Field field = ggfsHnd.getClass().getDeclaredField("errWrite"); + Field field = igfsHnd.getClass().getDeclaredField("errWrite"); field.setAccessible(true); http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemHandshakeSelfTest.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemHandshakeSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemHandshakeSelfTest.java index a371dae..137db6d 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemHandshakeSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemHandshakeSelfTest.java @@ -120,7 +120,7 @@ public class IgfsHadoopFileSystemHandshakeSelfTest extends IgfsCommonAbstractTes * * @throws Exception If failed. */ - public void testHandshakeDefaultGgfs() throws Exception { + public void testHandshakeDefaultIgfs() throws Exception { startUp(false, true); checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@"); @@ -145,7 +145,7 @@ public class IgfsHadoopFileSystemHandshakeSelfTest extends IgfsCommonAbstractTes * * @throws Exception If failed. */ - public void testHandshakeDefaultGridDefaultGgfs() throws Exception { + public void testHandshakeDefaultGridDefaultIgfs() throws Exception { startUp(true, true); checkInvalid(IGFS_NAME + ":" + GRID_NAME + "@"); @@ -169,26 +169,26 @@ public class IgfsHadoopFileSystemHandshakeSelfTest extends IgfsCommonAbstractTes * Perform startup. * * @param dfltGridName Default Grid name. - * @param dfltGgfsName Default IGFS name. + * @param dfltIgfsName Default IGFS name. * @throws Exception If failed. */ - private void startUp(boolean dfltGridName, boolean dfltGgfsName) throws Exception { - Ignite ignite = G.start(gridConfiguration(dfltGridName, dfltGgfsName)); + private void startUp(boolean dfltGridName, boolean dfltIgfsName) throws Exception { + Ignite ignite = G.start(gridConfiguration(dfltGridName, dfltIgfsName)); - IgniteFs ggfs = ignite.fileSystem(dfltGgfsName ? null : IGFS_NAME); + IgniteFs igfs = ignite.fileSystem(dfltIgfsName ? null : IGFS_NAME); - ggfs.mkdirs(PATH); + igfs.mkdirs(PATH); } /** * Create Grid configuration. * * @param dfltGridName Default Grid name. - * @param dfltGgfsName Default IGFS name. + * @param dfltIgfsName Default IGFS name. * @return Grid configuration. * @throws Exception If failed. */ - private IgniteConfiguration gridConfiguration(boolean dfltGridName, boolean dfltGgfsName) throws Exception { + private IgniteConfiguration gridConfiguration(boolean dfltGridName, boolean dfltIgfsName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(dfltGridName ? null : GRID_NAME); cfg.setLocalHost("127.0.0.1"); @@ -227,22 +227,22 @@ public class IgfsHadoopFileSystemHandshakeSelfTest extends IgfsCommonAbstractTes cfg.setCacheConfiguration(metaCacheCfg, dataCacheCfg); - IgfsConfiguration ggfsCfg = new IgfsConfiguration(); + IgfsConfiguration igfsCfg = new IgfsConfiguration(); - ggfsCfg.setDataCacheName("partitioned"); - ggfsCfg.setMetaCacheName("replicated"); - ggfsCfg.setName(dfltGgfsName ? null : IGFS_NAME); - ggfsCfg.setPrefetchBlocks(1); - ggfsCfg.setDefaultMode(PRIMARY); - ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ + igfsCfg.setDataCacheName("partitioned"); + igfsCfg.setMetaCacheName("replicated"); + igfsCfg.setName(dfltIgfsName ? null : IGFS_NAME); + igfsCfg.setPrefetchBlocks(1); + igfsCfg.setDefaultMode(PRIMARY); + igfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ put("type", "tcp"); put("port", String.valueOf(DFLT_IPC_PORT)); }}); - ggfsCfg.setManagementPort(-1); - ggfsCfg.setBlockSize(512 * 1024); + igfsCfg.setManagementPort(-1); + igfsCfg.setBlockSize(512 * 1024); - cfg.setIgfsConfiguration(ggfsCfg); + cfg.setIgfsConfiguration(igfsCfg); return cfg; } http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemIpcCacheSelfTest.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemIpcCacheSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemIpcCacheSelfTest.java index 5bd47b4..a6357f8 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemIpcCacheSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemIpcCacheSelfTest.java @@ -65,21 +65,21 @@ public class IgfsHadoopFileSystemIpcCacheSelfTest extends IgfsCommonAbstractTest cfg.setDiscoverySpi(discoSpi); - IgfsConfiguration ggfsCfg = new IgfsConfiguration(); + IgfsConfiguration igfsCfg = new IgfsConfiguration(); - ggfsCfg.setDataCacheName("partitioned"); - ggfsCfg.setMetaCacheName("replicated"); - ggfsCfg.setName("igfs"); - ggfsCfg.setManagementPort(IgfsConfiguration.DFLT_MGMT_PORT + cnt); + igfsCfg.setDataCacheName("partitioned"); + igfsCfg.setMetaCacheName("replicated"); + igfsCfg.setName("igfs"); + igfsCfg.setManagementPort(IgfsConfiguration.DFLT_MGMT_PORT + cnt); - ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ + igfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ put("type", "shmem"); put("port", String.valueOf(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + cnt)); }}); - ggfsCfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups. + igfsCfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups. - cfg.setIgfsConfiguration(ggfsCfg); + cfg.setIgfsConfiguration(igfsCfg); cfg.setCacheConfiguration(cacheConfiguration()); http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoggerStateSelfTest.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoggerStateSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoggerStateSelfTest.java index e282019..cbe83f0 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoggerStateSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoggerStateSelfTest.java @@ -45,7 +45,7 @@ import static org.apache.ignite.igfs.hadoop.IgfsHadoopParameters.*; */ public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractTest { /** IGFS. */ - private IgfsEx ggfs; + private IgfsEx igfs; /** File system. */ private FileSystem fs; @@ -60,7 +60,7 @@ public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractT @Override protected void afterTest() throws Exception { U.closeQuiet(fs); - ggfs = null; + igfs = null; fs = null; G.stopAll(true); @@ -75,14 +75,14 @@ public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractT * @throws Exception If failed. */ private void startUp() throws Exception { - IgfsConfiguration ggfsCfg = new IgfsConfiguration(); - - ggfsCfg.setDataCacheName("partitioned"); - ggfsCfg.setMetaCacheName("replicated"); - ggfsCfg.setName("igfs"); - ggfsCfg.setBlockSize(512 * 1024); - ggfsCfg.setDefaultMode(PRIMARY); - ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ + IgfsConfiguration igfsCfg = new IgfsConfiguration(); + + igfsCfg.setDataCacheName("partitioned"); + igfsCfg.setMetaCacheName("replicated"); + igfsCfg.setName("igfs"); + igfsCfg.setBlockSize(512 * 1024); + igfsCfg.setDefaultMode(PRIMARY); + igfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ put("type", "tcp"); put("port", "10500"); }}); @@ -116,16 +116,16 @@ public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractT cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(metaCacheCfg, cacheCfg); - cfg.setIgfsConfiguration(ggfsCfg); + cfg.setIgfsConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); cfg.setConnectorConfiguration(null); Ignite g = G.start(cfg); - ggfs = (IgfsEx)g.fileSystem("igfs"); + igfs = (IgfsEx)g.fileSystem("igfs"); - ggfs.globalSampling(sampling); + igfs.globalSampling(sampling); fs = fileSystem(); } @@ -222,7 +222,7 @@ public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractT fs.close(); // "Not set" => true transition. - ggfs.globalSampling(true); + igfs.globalSampling(true); fs = fileSystem(); @@ -231,14 +231,14 @@ public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractT fs.close(); // True => "not set" transition. - ggfs.globalSampling(null); + igfs.globalSampling(null); fs = fileSystem(); assert !logEnabled(); // "Not-set" => false transition. - ggfs.globalSampling(false); + igfs.globalSampling(false); fs = fileSystem(); @@ -247,7 +247,7 @@ public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractT fs.close(); // False => "not=set" transition. - ggfs.globalSampling(null); + igfs.globalSampling(null); fs = fileSystem(); @@ -256,8 +256,8 @@ public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractT fs.close(); // True => false transition. - ggfs.globalSampling(true); - ggfs.globalSampling(false); + igfs.globalSampling(true); + igfs.globalSampling(false); fs = fileSystem(); @@ -266,7 +266,7 @@ public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractT fs.close(); // False => true transition. - ggfs.globalSampling(true); + igfs.globalSampling(true); fs = fileSystem(); @@ -283,7 +283,7 @@ public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractT startUp(); assertEquals(Paths.get(U.getIgniteHome()).normalize().toString(), - ggfs.clientLogDirectory()); + igfs.clientLogDirectory()); } /** http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemSecondaryModeSelfTest.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemSecondaryModeSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemSecondaryModeSelfTest.java index bdce59e..2e22d93 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemSecondaryModeSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemSecondaryModeSelfTest.java @@ -83,21 +83,21 @@ public class IgfsHadoopFileSystemSecondaryModeSelfTest extends IgfsCommonAbstrac private void startUp() throws Exception { startUpSecondary(); - IgfsConfiguration ggfsCfg = new IgfsConfiguration(); - - ggfsCfg.setDataCacheName("partitioned"); - ggfsCfg.setMetaCacheName("replicated"); - ggfsCfg.setName("igfs"); - ggfsCfg.setBlockSize(512 * 1024); - ggfsCfg.setDefaultMode(mode); - ggfsCfg.setPathModes(pathModes); - ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ + IgfsConfiguration igfsCfg = new IgfsConfiguration(); + + igfsCfg.setDataCacheName("partitioned"); + igfsCfg.setMetaCacheName("replicated"); + igfsCfg.setName("igfs"); + igfsCfg.setBlockSize(512 * 1024); + igfsCfg.setDefaultMode(mode); + igfsCfg.setPathModes(pathModes); + igfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ put("type", "tcp"); put("port", "10500"); }}); - ggfsCfg.setManagementPort(-1); - ggfsCfg.setSecondaryFileSystem(new IgfsHadoopFileSystemWrapper( + igfsCfg.setManagementPort(-1); + igfsCfg.setSecondaryFileSystem(new IgfsHadoopFileSystemWrapper( "igfs://igfs-secondary:igfs-grid-secondary@127.0.0.1:11500/", "modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml")); @@ -130,7 +130,7 @@ public class IgfsHadoopFileSystemSecondaryModeSelfTest extends IgfsCommonAbstrac cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(metaCacheCfg, cacheCfg); - cfg.setIgfsConfiguration(ggfsCfg); + cfg.setIgfsConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); @@ -151,14 +151,14 @@ public class IgfsHadoopFileSystemSecondaryModeSelfTest extends IgfsCommonAbstrac * @throws Exception If failed. */ private void startUpSecondary() throws Exception { - IgfsConfiguration ggfsCfg = new IgfsConfiguration(); - - ggfsCfg.setDataCacheName("partitioned"); - ggfsCfg.setMetaCacheName("replicated"); - ggfsCfg.setName("igfs-secondary"); - ggfsCfg.setBlockSize(512 * 1024); - ggfsCfg.setDefaultMode(PRIMARY); - ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ + IgfsConfiguration igfsCfg = new IgfsConfiguration(); + + igfsCfg.setDataCacheName("partitioned"); + igfsCfg.setMetaCacheName("replicated"); + igfsCfg.setName("igfs-secondary"); + igfsCfg.setBlockSize(512 * 1024); + igfsCfg.setDefaultMode(PRIMARY); + igfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ put("type", "tcp"); put("port", "11500"); }}); @@ -192,7 +192,7 @@ public class IgfsHadoopFileSystemSecondaryModeSelfTest extends IgfsCommonAbstrac cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(metaCacheCfg, cacheCfg); - cfg.setIgfsConfiguration(ggfsCfg); + cfg.setIgfsConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/fd1f854b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java ---------------------------------------------------------------------- diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java index c8cf2d7..d27d93d 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java @@ -75,20 +75,20 @@ public class IgfsNearOnlyMultiNodeSelfTest extends GridCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); - IgfsConfiguration ggfsCfg = new IgfsConfiguration(); + IgfsConfiguration igfsCfg = new IgfsConfiguration(); - ggfsCfg.setDataCacheName("partitioned"); - ggfsCfg.setMetaCacheName("partitioned"); - ggfsCfg.setName("igfs"); + igfsCfg.setDataCacheName("partitioned"); + igfsCfg.setMetaCacheName("partitioned"); + igfsCfg.setName("igfs"); - ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ + igfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{ put("type", "shmem"); put("port", String.valueOf(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + cnt)); }}); - ggfsCfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups. + igfsCfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups. - cfg.setIgfsConfiguration(ggfsCfg); + cfg.setIgfsConfiguration(igfsCfg); cfg.setCacheConfiguration(cacheConfiguration(gridName));