http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/e1ffc10f/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/HadoopIpcIgfsIo.java
----------------------------------------------------------------------
diff --git 
a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/HadoopIpcIgfsIo.java
 
b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/HadoopIpcIgfsIo.java
deleted file mode 100644
index 4de4d53..0000000
--- 
a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/HadoopIpcIgfsIo.java
+++ /dev/null
@@ -1,599 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.igfs.hadoop;
-
-import org.apache.commons.logging.*;
-import org.apache.ignite.*;
-import org.apache.ignite.internal.*;
-import org.apache.ignite.internal.igfs.common.*;
-import org.apache.ignite.internal.util.*;
-import org.apache.ignite.internal.util.ipc.*;
-import org.apache.ignite.internal.util.ipc.shmem.*;
-import org.apache.ignite.internal.util.lang.*;
-import org.apache.ignite.internal.util.typedef.internal.*;
-import org.jdk8.backport.*;
-import org.jetbrains.annotations.*;
-
-import java.io.*;
-import java.util.*;
-import java.util.concurrent.*;
-import java.util.concurrent.atomic.*;
-import java.util.concurrent.locks.*;
-
-/**
- * IO layer implementation based on blocking IPC streams.
- */
-@SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
-public class HadoopIpcIgfsIo implements HadoopIgfsIo {
-    /** Logger. */
-    private Log log;
-
-    /** Request futures map. */
-    private ConcurrentMap<Long, HadoopIgfsFuture> reqMap =
-        new ConcurrentHashMap8<>();
-
-    /** Request ID counter. */
-    private AtomicLong reqIdCnt = new AtomicLong();
-
-    /** Endpoint. */
-    private IpcEndpoint endpoint;
-
-    /** Endpoint output stream. */
-    private IgfsDataOutputStream out;
-
-    /** Protocol. */
-    private final IgfsMarshaller marsh;
-
-    /** Client reader thread. */
-    private Thread reader;
-
-    /** Lock for graceful shutdown. */
-    private final ReadWriteLock busyLock = new ReentrantReadWriteLock();
-
-    /** Stopping flag. */
-    private volatile boolean stopping;
-
-    /** Server endpoint address. */
-    private final String endpointAddr;
-
-    /** Number of open file system sessions. */
-    private final AtomicInteger activeCnt = new AtomicInteger(1);
-
-    /** Event listeners. */
-    private final Collection<HadoopIgfsIpcIoListener> lsnrs =
-        new GridConcurrentHashSet<>();
-
-    /** Cached connections. */
-    private static final ConcurrentMap<String, HadoopIpcIgfsIo> ipcCache =
-        new ConcurrentHashMap8<>();
-
-    /** Striped lock that prevents multiple instance creation in {@link 
#get(Log, String)}. */
-    private static final GridStripedLock initLock = new GridStripedLock(32);
-
-    /**
-     * @param endpointAddr Endpoint.
-     * @param marsh Protocol.
-     * @param log Logger to use.
-     */
-    public HadoopIpcIgfsIo(String endpointAddr, IgfsMarshaller marsh, Log log) 
{
-        assert endpointAddr != null;
-        assert marsh != null;
-
-        this.endpointAddr = endpointAddr;
-        this.marsh = marsh;
-        this.log = log;
-    }
-
-    /**
-     * Returns a started and valid instance of this class
-     * for a given endpoint.
-     *
-     * @param log Logger to use for new instance.
-     * @param endpoint Endpoint string.
-     * @return New or existing cached instance, which is started and 
operational.
-     * @throws IOException If new instance was created but failed to start.
-     */
-    public static HadoopIpcIgfsIo get(Log log, String endpoint) throws 
IOException {
-        while (true) {
-            HadoopIpcIgfsIo clientIo = ipcCache.get(endpoint);
-
-            if (clientIo != null) {
-                if (clientIo.acquire())
-                    return clientIo;
-                else
-                    // If concurrent close.
-                    ipcCache.remove(endpoint, clientIo);
-            }
-            else {
-                Lock lock = initLock.getLock(endpoint);
-
-                lock.lock();
-
-                try {
-                    clientIo = ipcCache.get(endpoint);
-
-                    if (clientIo != null) { // Perform double check.
-                        if (clientIo.acquire())
-                            return clientIo;
-                        else
-                            // If concurrent close.
-                            ipcCache.remove(endpoint, clientIo);
-                    }
-
-                    // Otherwise try creating a new one.
-                    clientIo = new HadoopIpcIgfsIo(endpoint, new 
IgfsMarshaller(), log);
-
-                    try {
-                        clientIo.start();
-                    }
-                    catch (IgniteCheckedException e) {
-                        throw new IOException(e.getMessage(), e);
-                    }
-
-                    HadoopIpcIgfsIo old = ipcCache.putIfAbsent(endpoint, 
clientIo);
-
-                    // Put in exclusive lock.
-                    assert old == null;
-
-                    return clientIo;
-                }
-                finally {
-                    lock.unlock();
-                }
-            }
-        }
-    }
-
-    /**
-     * Increases usage count for this instance.
-     *
-     * @return {@code true} if usage count is greater than zero.
-     */
-    private boolean acquire() {
-        while (true) {
-            int cnt = activeCnt.get();
-
-            if (cnt == 0) {
-                if (log.isDebugEnabled())
-                    log.debug("IPC IO not acquired (count was 0): " + this);
-
-                return false;
-            }
-
-            // Need to make sure that no-one decremented count in between.
-            if (activeCnt.compareAndSet(cnt, cnt + 1)) {
-                if (log.isDebugEnabled())
-                    log.debug("IPC IO acquired: " + this);
-
-                return true;
-            }
-        }
-    }
-
-    /**
-     * Releases this instance, decrementing usage count.
-     * <p>
-     * If usage count becomes zero, the instance is stopped
-     * and removed from cache.
-     */
-    public void release() {
-        while (true) {
-            int cnt = activeCnt.get();
-
-            if (cnt == 0) {
-                if (log.isDebugEnabled())
-                    log.debug("IPC IO not released (count was 0): " + this);
-
-                return;
-            }
-
-            if (activeCnt.compareAndSet(cnt, cnt - 1)) {
-                if (cnt == 1) {
-                    ipcCache.remove(endpointAddr, this);
-
-                    if (log.isDebugEnabled())
-                        log.debug("IPC IO stopping as unused: " + this);
-
-                    stop();
-                }
-                else if (log.isDebugEnabled())
-                    log.debug("IPC IO released: " + this);
-
-                return;
-            }
-        }
-    }
-
-    /**
-     * Closes this IO instance, removing it from cache.
-     */
-    public void forceClose() {
-        if (ipcCache.remove(endpointAddr, this))
-            stop();
-    }
-
-    /**
-     * Starts the IO.
-     *
-     * @throws IgniteCheckedException If failed to connect the endpoint.
-     */
-    private void start() throws IgniteCheckedException {
-        boolean success = false;
-
-        try {
-            endpoint = IpcEndpointFactory.connectEndpoint(
-                    endpointAddr, new GridLoggerProxy(new 
HadoopIgfsJclLogger(log), null, null, ""));
-
-            out = new IgfsDataOutputStream(new 
BufferedOutputStream(endpoint.outputStream()));
-
-            reader = new ReaderThread();
-
-            // Required for Hadoop 2.x
-            reader.setDaemon(true);
-
-            reader.start();
-
-            success = true;
-        }
-        catch (IgniteCheckedException e) {
-            IpcOutOfSystemResourcesException resEx = 
e.getCause(IpcOutOfSystemResourcesException.class);
-
-            if (resEx != null)
-                throw new 
IgniteCheckedException(IpcSharedMemoryServerEndpoint.OUT_OF_RESOURCES_MSG, 
resEx);
-
-            throw e;
-        }
-        finally {
-            if (!success)
-                stop();
-        }
-    }
-
-    /**
-     * Shuts down the IO. No send requests will be accepted anymore, all 
pending futures will be failed.
-     * Close listeners will be invoked as if connection is closed by server.
-     */
-    private void stop() {
-        close0(null);
-
-        if (reader != null) {
-            try {
-                U.interrupt(reader);
-                U.join(reader);
-
-                reader = null;
-            }
-            catch (IgniteInterruptedCheckedException ignored) {
-                Thread.currentThread().interrupt();
-
-                log.warn("Got interrupted while waiting for reader thread to 
shut down (will return).");
-            }
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void addEventListener(HadoopIgfsIpcIoListener lsnr) {
-        if (!busyLock.readLock().tryLock()) {
-            lsnr.onClose();
-
-            return;
-        }
-
-        boolean invokeNow = false;
-
-        try {
-            invokeNow = stopping;
-
-            if (!invokeNow)
-                lsnrs.add(lsnr);
-        }
-        finally {
-            busyLock.readLock().unlock();
-
-            if (invokeNow)
-                lsnr.onClose();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void removeEventListener(HadoopIgfsIpcIoListener lsnr) {
-        lsnrs.remove(lsnr);
-    }
-
-    /** {@inheritDoc} */
-    @Override public GridPlainFuture<IgfsMessage> send(IgfsMessage msg) throws 
IgniteCheckedException {
-        return send(msg, null, 0, 0);
-    }
-
-    /** {@inheritDoc} */
-    @Override public <T> GridPlainFuture<T> send(IgfsMessage msg, @Nullable 
byte[] outBuf, int outOff,
-        int outLen) throws IgniteCheckedException {
-        assert outBuf == null || msg.command() == IgfsIpcCommand.READ_BLOCK;
-
-        if (!busyLock.readLock().tryLock())
-            throw new HadoopIgfsCommunicationException("Failed to send message 
(client is being concurrently " +
-                "closed).");
-
-        try {
-            if (stopping)
-                throw new HadoopIgfsCommunicationException("Failed to send 
message (client is being concurrently " +
-                    "closed).");
-
-            long reqId = reqIdCnt.getAndIncrement();
-
-            HadoopIgfsFuture<T> fut = new HadoopIgfsFuture<>();
-
-            fut.outputBuffer(outBuf);
-            fut.outputOffset(outOff);
-            fut.outputLength(outLen);
-            fut.read(msg.command() == IgfsIpcCommand.READ_BLOCK);
-
-            HadoopIgfsFuture oldFut = reqMap.putIfAbsent(reqId, fut);
-
-            assert oldFut == null;
-
-            if (log.isDebugEnabled())
-                log.debug("Sending IGFS message [reqId=" + reqId + ", msg=" + 
msg + ']');
-
-            byte[] hdr = IgfsMarshaller.createHeader(reqId, msg.command());
-
-            IgniteCheckedException err = null;
-
-            try {
-                synchronized (this) {
-                    marsh.marshall(msg, hdr, out);
-
-                    out.flush(); // Blocking operation + sometimes system call.
-                }
-            }
-            catch (IgniteCheckedException e) {
-                err = e;
-            }
-            catch (IOException e) {
-                err = new HadoopIgfsCommunicationException(e);
-            }
-
-            if (err != null) {
-                reqMap.remove(reqId, fut);
-
-                fut.onDone(err);
-            }
-
-            return fut;
-        }
-        finally {
-            busyLock.readLock().unlock();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void sendPlain(IgfsMessage msg) throws 
IgniteCheckedException {
-        if (!busyLock.readLock().tryLock())
-            throw new HadoopIgfsCommunicationException("Failed to send message 
(client is being " +
-                "concurrently closed).");
-
-        try {
-            if (stopping)
-                throw new HadoopIgfsCommunicationException("Failed to send 
message (client is being concurrently closed).");
-
-            assert msg.command() == IgfsIpcCommand.WRITE_BLOCK;
-
-            IgfsStreamControlRequest req = (IgfsStreamControlRequest)msg;
-
-            byte[] hdr = IgfsMarshaller.createHeader(-1, 
IgfsIpcCommand.WRITE_BLOCK);
-
-            U.longToBytes(req.streamId(), hdr, 12);
-            U.intToBytes(req.length(), hdr, 20);
-
-            synchronized (this) {
-                out.write(hdr);
-                out.write(req.data(), (int)req.position(), req.length());
-
-                out.flush();
-            }
-        }
-        catch (IOException e) {
-            throw new HadoopIgfsCommunicationException(e);
-        }
-        finally {
-            busyLock.readLock().unlock();
-        }
-    }
-
-    /**
-     * Closes client but does not wait.
-     *
-     * @param err Error.
-     */
-    private void close0(@Nullable Throwable err) {
-        busyLock.writeLock().lock();
-
-        try {
-            if (stopping)
-                return;
-
-            stopping = true;
-        }
-        finally {
-            busyLock.writeLock().unlock();
-        }
-
-        if (err == null)
-            err = new IgniteCheckedException("Failed to perform request 
(connection was concurrently closed before response " +
-                "is received).");
-
-        // Clean up resources.
-        U.closeQuiet(out);
-
-        if (endpoint != null)
-            endpoint.close();
-
-        // Unwind futures. We can safely iterate here because no more futures 
will be added.
-        Iterator<HadoopIgfsFuture> it = reqMap.values().iterator();
-
-        while (it.hasNext()) {
-            HadoopIgfsFuture fut = it.next();
-
-            fut.onDone(err);
-
-            it.remove();
-        }
-
-        for (HadoopIgfsIpcIoListener lsnr : lsnrs)
-            lsnr.onClose();
-    }
-
-    /**
-     * Do not extend {@code GridThread} to minimize class dependencies.
-     */
-    private class ReaderThread extends Thread {
-        /** {@inheritDoc} */
-        @SuppressWarnings("unchecked")
-        @Override public void run() {
-            // Error to fail pending futures.
-            Throwable err = null;
-
-            try {
-                InputStream in = endpoint.inputStream();
-
-                IgfsDataInputStream dis = new IgfsDataInputStream(in);
-
-                byte[] hdr = new byte[IgfsMarshaller.HEADER_SIZE];
-                byte[] msgHdr = new byte[IgfsControlResponse.RES_HEADER_SIZE];
-
-                while (!Thread.currentThread().isInterrupted()) {
-                    dis.readFully(hdr);
-
-                    long reqId = U.bytesToLong(hdr, 0);
-
-                    // We don't wait for write responses, therefore reqId is 
-1.
-                    if (reqId == -1) {
-                        // We received a response which normally should not be 
sent. It must contain an error.
-                        dis.readFully(msgHdr);
-
-                        assert msgHdr[4] != 0;
-
-                        String errMsg = dis.readUTF();
-
-                        // Error code.
-                        dis.readInt();
-
-                        long streamId = dis.readLong();
-
-                        for (HadoopIgfsIpcIoListener lsnr : lsnrs)
-                            lsnr.onError(streamId, errMsg);
-                    }
-                    else {
-                        HadoopIgfsFuture<Object> fut = reqMap.remove(reqId);
-
-                        if (fut == null) {
-                            String msg = "Failed to read response from server: 
response closure is unavailable for " +
-                                "requestId (will close connection):" + reqId;
-
-                            log.warn(msg);
-
-                            err = new IgniteCheckedException(msg);
-
-                            break;
-                        }
-                        else {
-                            try {
-                                IgfsIpcCommand cmd = 
IgfsIpcCommand.valueOf(U.bytesToInt(hdr, 8));
-
-                                if (log.isDebugEnabled())
-                                    log.debug("Received IGFS response [reqId=" 
+ reqId + ", cmd=" + cmd + ']');
-
-                                Object res = null;
-
-                                if (fut.read()) {
-                                    dis.readFully(msgHdr);
-
-                                    boolean hasErr = msgHdr[4] != 0;
-
-                                    if (hasErr) {
-                                        String errMsg = dis.readUTF();
-
-                                        // Error code.
-                                        Integer errCode = dis.readInt();
-
-                                        
IgfsControlResponse.throwError(errCode, errMsg);
-                                    }
-
-                                    int blockLen = U.bytesToInt(msgHdr, 5);
-
-                                    int readLen = Math.min(blockLen, 
fut.outputLength());
-
-                                    if (readLen > 0) {
-                                        assert fut.outputBuffer() != null;
-
-                                        dis.readFully(fut.outputBuffer(), 
fut.outputOffset(), readLen);
-                                    }
-
-                                    if (readLen != blockLen) {
-                                        byte[] buf = new byte[blockLen - 
readLen];
-
-                                        dis.readFully(buf);
-
-                                        res = buf;
-                                    }
-                                }
-                                else
-                                    res = marsh.unmarshall(cmd, hdr, dis);
-
-                                fut.onDone(res);
-                            }
-                            catch (IgniteCheckedException e) {
-                                if (log.isDebugEnabled())
-                                    log.debug("Failed to apply response 
closure (will fail request future): " +
-                                        e.getMessage());
-
-                                fut.onDone(e);
-
-                                err = e;
-                            }
-                        }
-                    }
-                }
-            }
-            catch (EOFException ignored) {
-                err = new IgniteCheckedException("Failed to read response from 
server (connection was closed by remote peer).");
-            }
-            catch (IOException e) {
-                if (!stopping)
-                    log.error("Failed to read data (connection will be 
closed)", e);
-
-                err = new HadoopIgfsCommunicationException(e);
-            }
-            catch (IgniteCheckedException e) {
-                if (!stopping)
-                    log.error("Failed to obtain endpoint input stream 
(connection will be closed)", e);
-
-                err = e;
-            }
-            finally {
-                close0(err);
-            }
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return getClass().getSimpleName() + " [endpointAddr=" + endpointAddr + 
", activeCnt=" + activeCnt +
-            ", stopping=" + stopping + ']';
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/e1ffc10f/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/HadoopOutProcIgfs.java
----------------------------------------------------------------------
diff --git 
a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/HadoopOutProcIgfs.java
 
b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/HadoopOutProcIgfs.java
deleted file mode 100644
index 19205c0..0000000
--- 
a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/HadoopOutProcIgfs.java
+++ /dev/null
@@ -1,466 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.igfs.hadoop;
-
-import org.apache.commons.logging.*;
-import org.apache.ignite.*;
-import org.apache.ignite.igfs.*;
-import org.apache.ignite.internal.igfs.common.*;
-import org.apache.ignite.internal.processors.igfs.*;
-import org.apache.ignite.internal.util.lang.*;
-import org.jdk8.backport.*;
-import org.jetbrains.annotations.*;
-
-import java.io.*;
-import java.util.*;
-
-import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.*;
-
-/**
- * Communication with external process (TCP or shmem).
- */
-public class HadoopOutProcIgfs implements HadoopIgfsEx, 
HadoopIgfsIpcIoListener {
-    /** Expected result is boolean. */
-    private static final GridPlainClosure<GridPlainFuture<IgfsMessage>, 
Boolean> BOOL_RES = createClosure();
-
-    /** Expected result is boolean. */
-    private static final GridPlainClosure<GridPlainFuture<IgfsMessage>, Long> 
LONG_RES = createClosure();
-
-    /** Expected result is {@code IgfsFile}. */
-    private static final GridPlainClosure<GridPlainFuture<IgfsMessage>, 
IgfsFile> FILE_RES = createClosure();
-
-    /** Expected result is {@code IgfsHandshakeResponse} */
-    private static final GridPlainClosure<GridPlainFuture<IgfsMessage>,
-        IgfsHandshakeResponse> HANDSHAKE_RES = createClosure();
-
-    /** Expected result is {@code IgfsStatus} */
-    private static final GridPlainClosure<GridPlainFuture<IgfsMessage>, 
IgfsStatus> STATUS_RES =
-        createClosure();
-
-    /** Expected result is {@code IgfsFile}. */
-    private static final GridPlainClosure<GridPlainFuture<IgfsMessage>,
-        IgfsInputStreamDescriptor> STREAM_DESCRIPTOR_RES = createClosure();
-
-    /** Expected result is {@code IgfsFile}. */
-    private static final GridPlainClosure<GridPlainFuture<IgfsMessage>,
-        Collection<IgfsFile>> FILE_COL_RES = createClosure();
-
-    /** Expected result is {@code IgfsFile}. */
-    private static final GridPlainClosure<GridPlainFuture<IgfsMessage>,
-        Collection<IgfsPath>> PATH_COL_RES = createClosure();
-
-    /** Expected result is {@code IgfsPathSummary}. */
-    private static final GridPlainClosure<GridPlainFuture<IgfsMessage>, 
IgfsPathSummary> SUMMARY_RES =
-        createClosure();
-
-    /** Expected result is {@code IgfsFile}. */
-    private static final GridPlainClosure<GridPlainFuture<IgfsMessage>,
-        Collection<IgfsBlockLocation>> BLOCK_LOCATION_COL_RES = 
createClosure();
-
-    /** Grid name. */
-    private final String grid;
-
-    /** IGFS name. */
-    private final String igfs;
-
-    /** Client log. */
-    private final Log log;
-
-    /** Client IO. */
-    private final HadoopIpcIgfsIo io;
-
-    /** Event listeners. */
-    private final Map<Long, HadoopIgfsStreamEventListener> lsnrs = new 
ConcurrentHashMap8<>();
-
-    /**
-     * Constructor for TCP endpoint.
-     *
-     * @param host Host.
-     * @param port Port.
-     * @param grid Grid name.
-     * @param igfs IGFS name.
-     * @param log Client logger.
-     * @throws IOException If failed.
-     */
-    public HadoopOutProcIgfs(String host, int port, String grid, String igfs, 
Log log) throws IOException {
-        this(host, port, grid, igfs, false, log);
-    }
-
-    /**
-     * Constructor for shmem endpoint.
-     *
-     * @param port Port.
-     * @param grid Grid name.
-     * @param igfs IGFS name.
-     * @param log Client logger.
-     * @throws IOException If failed.
-     */
-    public HadoopOutProcIgfs(int port, String grid, String igfs, Log log) 
throws IOException {
-        this(null, port, grid, igfs, true, log);
-    }
-
-    /**
-     * Constructor.
-     *
-     * @param host Host.
-     * @param port Port.
-     * @param grid Grid name.
-     * @param igfs IGFS name.
-     * @param shmem Shared memory flag.
-     * @param log Client logger.
-     * @throws IOException If failed.
-     */
-    private HadoopOutProcIgfs(String host, int port, String grid, String igfs, 
boolean shmem, Log log)
-        throws IOException {
-        assert host != null && !shmem || host == null && shmem :
-            "Invalid arguments [host=" + host + ", port=" + port + ", shmem=" 
+ shmem + ']';
-
-        String endpoint = host != null ? host + ":" + port : "shmem:" + port;
-
-        this.grid = grid;
-        this.igfs = igfs;
-        this.log = log;
-
-        io = HadoopIpcIgfsIo.get(log, endpoint);
-
-        io.addEventListener(this);
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsHandshakeResponse handshake(String logDir) throws 
IgniteCheckedException {
-        final IgfsHandshakeRequest req = new IgfsHandshakeRequest();
-
-        req.gridName(grid);
-        req.igfsName(igfs);
-        req.logDirectory(logDir);
-
-        return io.send(req).chain(HANDSHAKE_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close(boolean force) {
-        assert io != null;
-
-        io.removeEventListener(this);
-
-        if (force)
-            io.forceClose();
-        else
-            io.release();
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsFile info(IgfsPath path) throws 
IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(INFO);
-        msg.path(path);
-
-        return io.send(msg).chain(FILE_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsFile update(IgfsPath path, Map<String, String> props) 
throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(UPDATE);
-        msg.path(path);
-        msg.properties(props);
-
-        return io.send(msg).chain(FILE_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean setTimes(IgfsPath path, long accessTime, long 
modificationTime) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(SET_TIMES);
-        msg.path(path);
-        msg.accessTime(accessTime);
-        msg.modificationTime(modificationTime);
-
-        return io.send(msg).chain(BOOL_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean rename(IgfsPath src, IgfsPath dest) throws 
IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(RENAME);
-        msg.path(src);
-        msg.destinationPath(dest);
-
-        return io.send(msg).chain(BOOL_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean delete(IgfsPath path, boolean recursive) throws 
IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(DELETE);
-        msg.path(path);
-        msg.flag(recursive);
-
-        return io.send(msg).chain(BOOL_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsBlockLocation> affinity(IgfsPath path, 
long start, long len)
-        throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(AFFINITY);
-        msg.path(path);
-        msg.start(start);
-        msg.length(len);
-
-        return io.send(msg).chain(BLOCK_LOCATION_COL_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsPathSummary contentSummary(IgfsPath path) throws 
IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(PATH_SUMMARY);
-        msg.path(path);
-
-        return io.send(msg).chain(SUMMARY_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean mkdirs(IgfsPath path, Map<String, String> props) 
throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(MAKE_DIRECTORIES);
-        msg.path(path);
-        msg.properties(props);
-
-        return io.send(msg).chain(BOOL_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsFile> listFiles(IgfsPath path) throws 
IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(LIST_FILES);
-        msg.path(path);
-
-        return io.send(msg).chain(FILE_COL_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Collection<IgfsPath> listPaths(IgfsPath path) throws 
IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(LIST_PATHS);
-        msg.path(path);
-
-        return io.send(msg).chain(PATH_COL_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgfsStatus fsStatus() throws IgniteCheckedException {
-        return io.send(new IgfsStatusRequest()).chain(STATUS_RES).get();
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate open(IgfsPath path) throws 
IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(OPEN_READ);
-        msg.path(path);
-        msg.flag(false);
-
-        IgfsInputStreamDescriptor rmtDesc = 
io.send(msg).chain(STREAM_DESCRIPTOR_RES).get();
-
-        return new HadoopIgfsStreamDelegate(this, rmtDesc.streamId(), 
rmtDesc.length());
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate open(IgfsPath path,
-        int seqReadsBeforePrefetch) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(OPEN_READ);
-        msg.path(path);
-        msg.flag(true);
-        msg.sequentialReadsBeforePrefetch(seqReadsBeforePrefetch);
-
-        IgfsInputStreamDescriptor rmtDesc = 
io.send(msg).chain(STREAM_DESCRIPTOR_RES).get();
-
-        return new HadoopIgfsStreamDelegate(this, rmtDesc.streamId(), 
rmtDesc.length());
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate create(IgfsPath path, boolean 
overwrite, boolean colocate,
-        int replication, long blockSize, @Nullable Map<String, String> props) 
throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(OPEN_CREATE);
-        msg.path(path);
-        msg.flag(overwrite);
-        msg.colocate(colocate);
-        msg.properties(props);
-        msg.replication(replication);
-        msg.blockSize(blockSize);
-
-        Long streamId = io.send(msg).chain(LONG_RES).get();
-
-        return new HadoopIgfsStreamDelegate(this, streamId);
-    }
-
-    /** {@inheritDoc} */
-    @Override public HadoopIgfsStreamDelegate append(IgfsPath path, boolean 
create,
-        @Nullable Map<String, String> props) throws IgniteCheckedException {
-        final IgfsPathControlRequest msg = new IgfsPathControlRequest();
-
-        msg.command(OPEN_APPEND);
-        msg.path(path);
-        msg.flag(create);
-        msg.properties(props);
-
-        Long streamId = io.send(msg).chain(LONG_RES).get();
-
-        return new HadoopIgfsStreamDelegate(this, streamId);
-    }
-
-    /** {@inheritDoc} */
-    @Override public GridPlainFuture<byte[]> readData(HadoopIgfsStreamDelegate 
desc, long pos, int len,
-        final @Nullable byte[] outBuf, final int outOff, final int outLen) {
-        assert len > 0;
-
-        final IgfsStreamControlRequest msg = new IgfsStreamControlRequest();
-
-        msg.command(READ_BLOCK);
-        msg.streamId((long) desc.target());
-        msg.position(pos);
-        msg.length(len);
-
-        try {
-            return io.send(msg, outBuf, outOff, outLen);
-        }
-        catch (IgniteCheckedException e) {
-            return new GridPlainFutureAdapter<>(e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeData(HadoopIgfsStreamDelegate desc, byte[] 
data, int off, int len)
-        throws IOException {
-        final IgfsStreamControlRequest msg = new IgfsStreamControlRequest();
-
-        msg.command(WRITE_BLOCK);
-        msg.streamId((long) desc.target());
-        msg.data(data);
-        msg.position(off);
-        msg.length(len);
-
-        try {
-            io.sendPlain(msg);
-        }
-        catch (IgniteCheckedException e) {
-            throw HadoopIgfsUtils.cast(e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void flush(HadoopIgfsStreamDelegate delegate) throws 
IOException {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public void closeStream(HadoopIgfsStreamDelegate desc) throws 
IOException {
-        final IgfsStreamControlRequest msg = new IgfsStreamControlRequest();
-
-        msg.command(CLOSE);
-        msg.streamId((long)desc.target());
-
-        try {
-            io.send(msg).chain(BOOL_RES).get();
-        }
-        catch (IgniteCheckedException e) {
-            throw HadoopIgfsUtils.cast(e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void addEventListener(HadoopIgfsStreamDelegate desc,
-        HadoopIgfsStreamEventListener lsnr) {
-        long streamId = desc.target();
-
-        HadoopIgfsStreamEventListener lsnr0 = lsnrs.put(streamId, lsnr);
-
-        assert lsnr0 == null || lsnr0 == lsnr;
-
-        if (log.isDebugEnabled())
-            log.debug("Added stream event listener [streamId=" + streamId + 
']');
-    }
-
-    /** {@inheritDoc} */
-    @Override public void removeEventListener(HadoopIgfsStreamDelegate desc) {
-        long streamId = desc.target();
-
-        HadoopIgfsStreamEventListener lsnr0 = lsnrs.remove(streamId);
-
-        if (lsnr0 != null && log.isDebugEnabled())
-            log.debug("Removed stream event listener [streamId=" + streamId + 
']');
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onClose() {
-        for (HadoopIgfsStreamEventListener lsnr : lsnrs.values()) {
-            try {
-                lsnr.onClose();
-            }
-            catch (IgniteCheckedException e) {
-                log.warn("Got exception from stream event listener (will 
ignore): " + lsnr, e);
-            }
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onError(long streamId, String errMsg) {
-        HadoopIgfsStreamEventListener lsnr = lsnrs.get(streamId);
-
-        if (lsnr != null)
-            lsnr.onError(errMsg);
-        else
-            log.warn("Received write error response for not registered output 
stream (will ignore) " +
-                "[streamId= " + streamId + ']');
-    }
-
-    /**
-     * Creates conversion closure for given type.
-     *
-     * @param <T> Type of expected result.
-     * @return Conversion closure.
-     */
-    @SuppressWarnings("unchecked")
-    private static <T> GridPlainClosure<GridPlainFuture<IgfsMessage>, T> 
createClosure() {
-        return new GridPlainClosure<GridPlainFuture<IgfsMessage>, T>() {
-            @Override public T apply(GridPlainFuture<IgfsMessage> fut) throws 
IgniteCheckedException {
-                IgfsControlResponse res = (IgfsControlResponse)fut.get();
-
-                if (res.hasError())
-                    res.throwError();
-
-                return (T)res.response();
-            }
-        };
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/e1ffc10f/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/HadoopOutputIgfsStream.java
----------------------------------------------------------------------
diff --git 
a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/HadoopOutputIgfsStream.java
 
b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/HadoopOutputIgfsStream.java
deleted file mode 100644
index ab5fa68..0000000
--- 
a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/HadoopOutputIgfsStream.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.igfs.hadoop;
-
-import org.apache.commons.logging.*;
-import org.apache.ignite.*;
-import org.apache.ignite.internal.igfs.common.*;
-import org.jetbrains.annotations.*;
-
-import java.io.*;
-
-/**
- * IGFS Hadoop output stream implementation.
- */
-public class HadoopOutputIgfsStream extends OutputStream implements 
HadoopIgfsStreamEventListener {
-    /** Log instance. */
-    private Log log;
-
-    /** Client logger. */
-    private IgfsLogger clientLog;
-
-    /** Log stream ID. */
-    private long logStreamId;
-
-    /** Server stream delegate. */
-    private HadoopIgfsStreamDelegate delegate;
-
-    /** Closed flag. */
-    private volatile boolean closed;
-
-    /** Flag set if stream was closed due to connection breakage. */
-    private boolean connBroken;
-
-    /** Error message. */
-    private volatile String errMsg;
-
-    /** Read time. */
-    private long writeTime;
-
-    /** User time. */
-    private long userTime;
-
-    /** Last timestamp. */
-    private long lastTs;
-
-    /** Amount of written bytes. */
-    private long total;
-
-    /**
-     * Creates light output stream.
-     *
-     * @param delegate Server stream delegate.
-     * @param log Logger to use.
-     * @param clientLog Client logger.
-     */
-    public HadoopOutputIgfsStream(HadoopIgfsStreamDelegate delegate, Log log,
-        IgfsLogger clientLog, long logStreamId) {
-        this.delegate = delegate;
-        this.log = log;
-        this.clientLog = clientLog;
-        this.logStreamId = logStreamId;
-
-        lastTs = System.nanoTime();
-
-        delegate.hadoop().addEventListener(delegate, this);
-    }
-
-    /**
-     * Read start.
-     */
-    private void writeStart() {
-        long now = System.nanoTime();
-
-        userTime += now - lastTs;
-
-        lastTs = now;
-    }
-
-    /**
-     * Read end.
-     */
-    private void writeEnd() {
-        long now = System.nanoTime();
-
-        writeTime += now - lastTs;
-
-        lastTs = now;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void write(@NotNull byte[] b, int off, int len) throws 
IOException {
-        check();
-
-        writeStart();
-
-        try {
-            delegate.hadoop().writeData(delegate, b, off, len);
-
-            total += len;
-        }
-        finally {
-            writeEnd();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void write(int b) throws IOException {
-        write(new byte[] {(byte)b});
-
-        total++;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void flush() throws IOException {
-        delegate.hadoop().flush(delegate);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close() throws IOException {
-        if (!closed) {
-            if (log.isDebugEnabled())
-                log.debug("Closing output stream: " + delegate);
-
-            writeStart();
-
-            delegate.hadoop().closeStream(delegate);
-
-            markClosed(false);
-
-            writeEnd();
-
-            if (clientLog.isLogEnabled())
-                clientLog.logCloseOut(logStreamId, userTime, writeTime, total);
-
-            if (log.isDebugEnabled())
-                log.debug("Closed output stream [delegate=" + delegate + ", 
writeTime=" + writeTime / 1000 +
-                    ", userTime=" + userTime / 1000 + ']');
-        }
-        else if(connBroken)
-            throw new IOException(
-                "Failed to close stream, because connection was broken (data 
could have been lost).");
-    }
-
-    /**
-     * Marks stream as closed.
-     *
-     * @param connBroken {@code True} if connection with server was lost.
-     */
-    private void markClosed(boolean connBroken) {
-        // It is ok to have race here.
-        if (!closed) {
-            closed = true;
-
-            delegate.hadoop().removeEventListener(delegate);
-
-            this.connBroken = connBroken;
-        }
-    }
-
-    /**
-     * @throws IOException If check failed.
-     */
-    private void check() throws IOException {
-        String errMsg0 = errMsg;
-
-        if (errMsg0 != null)
-            throw new IOException(errMsg0);
-
-        if (closed) {
-            if (connBroken)
-                throw new IOException("Server connection was lost.");
-            else
-                throw new IOException("Stream is closed.");
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onClose() throws IgniteCheckedException {
-        markClosed(true);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onError(String errMsg) {
-        this.errMsg = errMsg;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/e1ffc10f/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/package.html
----------------------------------------------------------------------
diff --git 
a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/package.html
 
b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/package.html
deleted file mode 100644
index ec380f2..0000000
--- 
a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/package.html
+++ /dev/null
@@ -1,24 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" 
"http://www.w3.org/TR/html4/loose.dtd";>
-<html>
-<body>
-    <!-- Package description. -->
-    Contains IGFS client classes.
-</body>
-</html>

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/e1ffc10f/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/package.html
----------------------------------------------------------------------
diff --git 
a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/package.html 
b/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/package.html
deleted file mode 100644
index 4b070d3..0000000
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/package.html
+++ /dev/null
@@ -1,24 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" 
"http://www.w3.org/TR/html4/loose.dtd";>
-<html>
-<body>
-    <!-- Package description. -->
-    Contains IGFS client and common classes.
-</body>
-</html>

Reply via email to