http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemIpcCacheSelfTest.java
----------------------------------------------------------------------
diff --cc 
modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemIpcCacheSelfTest.java
index 31f39da,0000000..defd994
mode 100644,000000..100644
--- 
a/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemIpcCacheSelfTest.java
+++ 
b/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemIpcCacheSelfTest.java
@@@ -1,207 -1,0 +1,207 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.ignite.ignitefs;
 +
 +import org.apache.hadoop.conf.*;
 +import org.apache.hadoop.fs.*;
 +import org.apache.ignite.cache.*;
 +import org.apache.ignite.configuration.*;
 +import org.apache.ignite.internal.fs.hadoop.*;
 +import org.apache.ignite.internal.processors.fs.*;
 +import org.apache.ignite.internal.util.ipc.shmem.*;
 +import org.apache.ignite.internal.util.typedef.*;
 +import org.apache.ignite.internal.util.typedef.internal.*;
 +import org.apache.ignite.spi.communication.tcp.*;
 +import org.apache.ignite.spi.discovery.tcp.*;
 +import org.apache.ignite.spi.discovery.tcp.ipfinder.*;
 +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.*;
 +
 +import java.lang.reflect.*;
 +import java.net.*;
 +import java.util.*;
 +import java.util.concurrent.atomic.*;
 +
 +import static org.apache.ignite.cache.CacheAtomicityMode.*;
 +import static org.apache.ignite.cache.CacheMode.*;
 +import static org.apache.ignite.events.EventType.*;
 +
 +/**
 + * IPC cache test.
 + */
 +public class GridGgfsHadoopFileSystemIpcCacheSelfTest extends 
GridGgfsCommonAbstractTest {
 +    /** IP finder. */
 +    private static final TcpDiscoveryIpFinder IP_FINDER = new 
TcpDiscoveryVmIpFinder(true);
 +
 +    /** Path to test hadoop configuration. */
 +    private static final String HADOOP_FS_CFG = 
"modules/core/src/test/config/hadoop/core-site.xml";
 +
 +    /** Group size. */
 +    public static final int GRP_SIZE = 128;
 +
 +    /** Started grid counter. */
 +    private static int cnt;
 +
 +    /** {@inheritDoc} */
 +    @Override protected IgniteConfiguration getConfiguration(String gridName) 
throws Exception {
 +        IgniteConfiguration cfg = super.getConfiguration(gridName);
 +
 +        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
 +        discoSpi.setIpFinder(IP_FINDER);
 +
 +        cfg.setDiscoverySpi(discoSpi);
 +
 +        IgniteFsConfiguration ggfsCfg = new IgniteFsConfiguration();
 +
 +        ggfsCfg.setDataCacheName("partitioned");
 +        ggfsCfg.setMetaCacheName("replicated");
 +        ggfsCfg.setName("ggfs");
 +        ggfsCfg.setManagementPort(IgniteFsConfiguration.DFLT_MGMT_PORT + cnt);
 +
 +        ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{
 +            put("type", "shmem");
 +            put("port", 
String.valueOf(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + cnt));
 +        }});
 +
 +        ggfsCfg.setBlockSize(512 * 1024); // Together with group blocks 
mapper will yield 64M per node groups.
 +
 +        cfg.setGgfsConfiguration(ggfsCfg);
 +
 +        cfg.setCacheConfiguration(cacheConfiguration());
 +
 +        cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, 
EVT_JOB_MAPPED);
 +
 +        TcpCommunicationSpi commSpi = new TcpCommunicationSpi();
 +
 +        commSpi.setSharedMemoryPort(-1);
 +
 +        cfg.setCommunicationSpi(commSpi);
 +
 +        cnt++;
 +
 +        return cfg;
 +    }
 +
 +    /**
 +     * Gets cache configuration.
 +     *
 +     * @return Cache configuration.
 +     */
 +    private CacheConfiguration[] cacheConfiguration() {
 +        CacheConfiguration cacheCfg = defaultCacheConfiguration();
 +
 +        cacheCfg.setName("partitioned");
 +        cacheCfg.setCacheMode(PARTITIONED);
 +        cacheCfg.setDistributionMode(CacheDistributionMode.PARTITIONED_ONLY);
 +        
cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
 +        cacheCfg.setAffinityMapper(new 
IgniteFsGroupDataBlocksKeyMapper(GRP_SIZE));
 +        cacheCfg.setBackups(0);
 +        cacheCfg.setQueryIndexEnabled(false);
 +        cacheCfg.setAtomicityMode(TRANSACTIONAL);
 +
 +        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
 +
 +        metaCacheCfg.setName("replicated");
 +        metaCacheCfg.setCacheMode(REPLICATED);
 +        
metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
 +        metaCacheCfg.setQueryIndexEnabled(false);
 +        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
 +
 +        return new CacheConfiguration[] {metaCacheCfg, cacheCfg};
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override protected void beforeTestsStarted() throws Exception {
 +        startGrids(4);
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override protected void afterTestsStopped() throws Exception {
 +        G.stopAll(true);
 +    }
 +
 +    /**
 +     * Test how IPC cache map works.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    @SuppressWarnings("unchecked")
 +    public void testIpcCache() throws Exception {
 +        Field cacheField = 
GridGgfsHadoopIpcIo.class.getDeclaredField("ipcCache");
 +
 +        cacheField.setAccessible(true);
 +
 +        Field activeCntField = 
GridGgfsHadoopIpcIo.class.getDeclaredField("activeCnt");
 +
 +        activeCntField.setAccessible(true);
 +
 +        Map<String, GridGgfsHadoopIpcIo> cache = (Map<String, 
GridGgfsHadoopIpcIo>)cacheField.get(null);
 +
 +        String name = "ggfs:" + getTestGridName(0) + "@";
 +
 +        Configuration cfg = new Configuration();
 +
-         cfg.addResource(U.resolveGridGainUrl(HADOOP_FS_CFG));
++        cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG));
 +        cfg.setBoolean("fs.ggfs.impl.disable.cache", true);
 +        
cfg.setBoolean(String.format(GridGgfsHadoopUtils.PARAM_GGFS_ENDPOINT_NO_EMBED, 
name), true);
 +
 +        // Ensure that existing IO is reused.
 +        FileSystem fs1 = FileSystem.get(new URI("ggfs://" + name + "/"), cfg);
 +
 +        assertEquals(1, cache.size());
 +
 +        GridGgfsHadoopIpcIo io = null;
 +
 +        System.out.println("CACHE: " + cache);
 +
 +        for (String key : cache.keySet()) {
 +            if (key.contains("10500")) {
 +                io = cache.get(key);
 +
 +                break;
 +            }
 +        }
 +
 +        assert io != null;
 +
 +        assertEquals(1, ((AtomicInteger)activeCntField.get(io)).get());
 +
 +        // Ensure that when IO is used by multiple file systems and one of 
them is closed, IO is not stopped.
 +        FileSystem fs2 = FileSystem.get(new URI("ggfs://" + name + "/abc"), 
cfg);
 +
 +        assertEquals(1, cache.size());
 +        assertEquals(2, ((AtomicInteger)activeCntField.get(io)).get());
 +
 +        fs2.close();
 +
 +        assertEquals(1, cache.size());
 +        assertEquals(1, ((AtomicInteger)activeCntField.get(io)).get());
 +
 +        Field stopField = 
GridGgfsHadoopIpcIo.class.getDeclaredField("stopping");
 +
 +        stopField.setAccessible(true);
 +
 +        assert !(Boolean)stopField.get(io);
 +
 +        // Ensure that IO is stopped when nobody else is need it.
 +        fs1.close();
 +
 +        assert cache.isEmpty();
 +
 +        assert (Boolean)stopField.get(io);
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemLoggerSelfTest.java
----------------------------------------------------------------------
diff --cc 
modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemLoggerSelfTest.java
index b17f976,0000000..687408f
mode 100644,000000..100644
--- 
a/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemLoggerSelfTest.java
+++ 
b/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemLoggerSelfTest.java
@@@ -1,287 -1,0 +1,287 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.ignite.ignitefs;
 +
 +import org.apache.ignite.internal.fs.common.*;
 +import org.apache.ignite.internal.processors.fs.*;
 +import org.apache.ignite.internal.util.typedef.internal.*;
 +
 +import java.io.*;
 +import java.util.*;
 +
 +import static org.apache.ignite.ignitefs.IgniteFsMode.*;
 +import static org.apache.ignite.internal.fs.common.GridGgfsLogger.*;
 +
 +/**
 + * Grid GGFS client logger test.
 + */
 +public class GridGgfsHadoopFileSystemLoggerSelfTest extends 
GridGgfsCommonAbstractTest {
 +    /** Path string. */
 +    private static final String PATH_STR = "/dir1/dir2/file;test";
 +
 +    /** Path string with escaped semicolons. */
 +    private static final String PATH_STR_ESCAPED = PATH_STR.replace(';', '~');
 +
 +    /** Path. */
 +    private static final IgniteFsPath PATH = new IgniteFsPath(PATH_STR);
 +
 +    /** GGFS name. */
 +    private static final String GGFS_NAME = "ggfs";
 +
 +    /** Log file path. */
-     private static final String LOG_DIR = U.getGridGainHome();
++    private static final String LOG_DIR = U.getIgniteHome();
 +
 +    /** Endpoint address. */
 +    private static final String ENDPOINT = "localhost:10500";
 +
 +    /** Log file name. */
 +    private static final String LOG_FILE = LOG_DIR + File.separator + 
"ggfs-log-" + GGFS_NAME + "-" + U.jvmPid() +
 +        ".csv";
 +
 +    /** {@inheritDoc} */
 +    @Override protected void beforeTestsStarted() throws Exception {
 +        removeLogs();
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override protected void afterTest() throws Exception {
 +        removeLogs();
 +    }
 +
 +    /**
 +     * Remove existing logs.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    private void removeLogs() throws Exception {
 +        File dir = new File(LOG_DIR);
 +
 +        File[] logs = dir.listFiles(new FilenameFilter() {
 +            @Override public boolean accept(File dir, String name) {
 +                return name.startsWith("ggfs-log-");
 +            }
 +        });
 +
 +        for (File log : logs)
 +            log.delete();
 +    }
 +
 +    /**
 +     * Ensure correct static loggers creation/removal as well as file 
creation.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testCreateDelete() throws Exception {
 +        GridGgfsLogger log = GridGgfsLogger.logger(ENDPOINT, GGFS_NAME, 
LOG_DIR, 10);
 +
 +        GridGgfsLogger sameLog0 = GridGgfsLogger.logger(ENDPOINT, GGFS_NAME, 
LOG_DIR, 10);
 +
 +        // Loggers for the same endpoint must be the same object.
 +        assert log == sameLog0;
 +
 +        GridGgfsLogger otherLog = GridGgfsLogger.logger("other" + ENDPOINT, 
GGFS_NAME, LOG_DIR, 10);
 +
 +        // Logger for another endpoint must be different.
 +        assert log != otherLog;
 +
 +        otherLog.close();
 +
 +        log.logDelete(PATH, PRIMARY, false);
 +
 +        log.close();
 +
 +        File logFile = new File(LOG_FILE);
 +
 +        // When there are multiple loggers, closing one must not force 
flushing.
 +        assert !logFile.exists();
 +
 +        GridGgfsLogger sameLog1 = GridGgfsLogger.logger(ENDPOINT, GGFS_NAME, 
LOG_DIR, 10);
 +
 +        assert sameLog0 == sameLog1;
 +
 +        sameLog0.close();
 +
 +        assert !logFile.exists();
 +
 +        sameLog1.close();
 +
 +        // When we cloe the last logger, it must flush data to disk.
 +        assert logFile.exists();
 +
 +        logFile.delete();
 +
 +        GridGgfsLogger sameLog2 = GridGgfsLogger.logger(ENDPOINT, GGFS_NAME, 
LOG_DIR, 10);
 +
 +        // This time we expect new logger instance to be created.
 +        assert sameLog0 != sameLog2;
 +
 +        sameLog2.close();
 +
 +        // As we do not add any records to the logger, we do not expect 
flushing.
 +        assert !logFile.exists();
 +    }
 +
 +    /**
 +     * Test read operations logging.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testLogRead() throws Exception {
 +        GridGgfsLogger log = GridGgfsLogger.logger(ENDPOINT, GGFS_NAME, 
LOG_DIR, 10);
 +
 +        log.logOpen(1, PATH, PRIMARY, 2, 3L);
 +        log.logRandomRead(1, 4L, 5);
 +        log.logSeek(1, 6L);
 +        log.logSkip(1, 7L);
 +        log.logMark(1, 8L);
 +        log.logReset(1);
 +        log.logCloseIn(1, 9L, 10L, 11);
 +
 +        log.close();
 +
 +        checkLog(
 +            new SB().a(U.jvmPid() + d() + TYPE_OPEN_IN + d() + 
PATH_STR_ESCAPED + d() + PRIMARY + d() + 1 + d() + 2 +
 +                d() + 3 + d(14)).toString(),
 +            new SB().a(U.jvmPid() + d() + TYPE_RANDOM_READ + d(3) + 1 + d(7) 
+ 4 + d() + 5 + d(8)).toString(),
 +            new SB().a(U.jvmPid() + d() + TYPE_SEEK + d(3) + 1 + d(7) + 6 + 
d(9)).toString(),
 +            new SB().a(U.jvmPid() + d() + TYPE_SKIP + d(3) + 1 + d(9) + 7 + 
d(7)).toString(),
 +            new SB().a(U.jvmPid() + d() + TYPE_MARK + d(3) + 1 + d(10) + 8 + 
d(6)).toString(),
 +            new SB().a(U.jvmPid() + d() + TYPE_RESET + d(3) + 1 + 
d(16)).toString(),
 +            new SB().a(U.jvmPid() + d() + TYPE_CLOSE_IN + d(3) + 1 + d(11) + 
9 + d() + 10 + d() + 11 + d(3)).toString()
 +        );
 +    }
 +
 +    /**
 +     * Test write operations logging.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testLogWrite() throws Exception {
 +        GridGgfsLogger log = GridGgfsLogger.logger(ENDPOINT, GGFS_NAME, 
LOG_DIR, 10);
 +
 +        log.logCreate(1, PATH, PRIMARY, true, 2, new Integer(3).shortValue(), 
4L);
 +        log.logAppend(2, PATH, PRIMARY, 8);
 +        log.logCloseOut(2, 9L, 10L, 11);
 +
 +        log.close();
 +
 +        checkLog(
 +            new SB().a(U.jvmPid() + d() + TYPE_OPEN_OUT + d() + 
PATH_STR_ESCAPED + d() + PRIMARY + d() + 1 + d() +
 +                2 + d(2) + 0 + d() + 1 + d() + 3 + d() + 4 + 
d(10)).toString(),
 +            new SB().a(U.jvmPid() + d() + TYPE_OPEN_OUT + d() + 
PATH_STR_ESCAPED + d() + PRIMARY + d() + 2 + d() +
 +                8 + d(2) + 1 + d(13)).toString(),
 +            new SB().a(U.jvmPid() + d() + TYPE_CLOSE_OUT + d(3) + 2 + d(11) + 
9 + d() + 10 + d() + 11 + d(3))
 +                .toString()
 +        );
 +    }
 +
 +    /**
 +     * Test miscellaneous operations logging.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    @SuppressWarnings("TooBroadScope")
 +    public void testLogMisc() throws Exception {
 +        GridGgfsLogger log = GridGgfsLogger.logger(ENDPOINT, GGFS_NAME, 
LOG_DIR, 10);
 +
 +        String newFile = "/dir3/file.test";
 +        String file1 = "/dir3/file1.test";
 +        String file2 = "/dir3/file1.test";
 +
 +        log.logMakeDirectory(PATH, PRIMARY);
 +        log.logRename(PATH, PRIMARY, new IgniteFsPath(newFile));
 +        log.logListDirectory(PATH, PRIMARY, new String[] { file1, file2 });
 +        log.logDelete(PATH, PRIMARY, false);
 +
 +        log.close();
 +
 +        checkLog(
 +            new SB().a(U.jvmPid() + d() + TYPE_DIR_MAKE + d() + 
PATH_STR_ESCAPED + d() + PRIMARY + d(17)).toString(),
 +            new SB().a(U.jvmPid() + d() + TYPE_RENAME + d() + 
PATH_STR_ESCAPED + d() + PRIMARY + d(15) + newFile +
 +                d(2)).toString(),
 +            new SB().a(U.jvmPid() + d() + TYPE_DIR_LIST + d() + 
PATH_STR_ESCAPED + d() + PRIMARY + d(17) + file1 +
 +                DELIM_FIELD_VAL + file2).toString(),
 +            new SB().a(U.jvmPid() + d() + TYPE_DELETE + d(1) + 
PATH_STR_ESCAPED + d() + PRIMARY + d(16) + 0 +
 +                d()).toString()
 +        );
 +    }
 +
 +    /**
 +     * Create GGFS file with the given path.
 +     *
 +     * @param path File path.
 +     * @return GGFS file instance.
 +     */
 +    private IgniteFsFile file(String path) {
 +        return new IgniteFsFileImpl(new IgniteFsPath(path), new 
GridGgfsFileInfo(), 64 * 1024 * 1024);
 +    }
 +
 +    /**
 +     * Ensure that log file has only the following lines.
 +     *
 +     * @param lines Expected lines.
 +     */
 +    private void checkLog(String... lines) throws Exception {
 +        BufferedReader br = new BufferedReader(new InputStreamReader(new 
FileInputStream(LOG_FILE)));
 +
 +        List<String> logLines = new ArrayList<>(lines.length);
 +
 +        String nextLogLine;
 +
 +        while ((nextLogLine = br.readLine()) != null)
 +            logLines.add(nextLogLine);
 +
 +        U.closeQuiet(br);
 +
 +        assertEquals(lines.length + 1, logLines.size());
 +
 +        assertEquals(logLines.get(0), HDR);
 +
 +        for (int i = 0; i < lines.length; i++) {
 +            String logLine = logLines.get(i + 1);
 +
 +            logLine = logLine.substring(logLine.indexOf(DELIM_FIELD, 
logLine.indexOf(DELIM_FIELD) + 1) + 1);
 +
 +            assertEquals(lines[i], logLine);
 +        }
 +    }
 +
 +    /**
 +     * Return single field delimiter.
 +     *
 +     * @return Single field delimiter.
 +     */
 +    private String d() {
 +        return d(1);
 +    }
 +
 +    /**
 +     * Return a bunch of field delimiters.
 +     *
 +     * @param cnt Amount of field delimiters.
 +     * @return Field delimiters.
 +     */
 +    private String d(int cnt) {
 +        SB buf = new SB();
 +
 +        for (int i = 0; i < cnt; i++)
 +            buf.a(DELIM_FIELD);
 +
 +        return buf.toString();
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemLoggerStateSelfTest.java
----------------------------------------------------------------------
diff --cc 
modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemLoggerStateSelfTest.java
index 033ee42,0000000..ef6067c
mode 100644,000000..100644
--- 
a/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemLoggerStateSelfTest.java
+++ 
b/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemLoggerStateSelfTest.java
@@@ -1,325 -1,0 +1,325 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.ignite.ignitefs;
 +
 +import org.apache.hadoop.conf.*;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.ignite.*;
 +import org.apache.ignite.cache.*;
 +import org.apache.ignite.configuration.*;
 +import org.apache.ignite.ignitefs.hadoop.v1.*;
 +import org.apache.ignite.internal.fs.common.*;
 +import org.apache.ignite.internal.processors.fs.*;
 +import org.apache.ignite.internal.util.typedef.*;
 +import org.apache.ignite.internal.util.typedef.internal.*;
 +import org.apache.ignite.spi.discovery.tcp.*;
 +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.*;
 +
 +import java.lang.reflect.*;
 +import java.net.*;
 +import java.nio.file.*;
 +import java.util.*;
 +
 +import static org.apache.ignite.cache.CacheAtomicityMode.*;
 +import static org.apache.ignite.cache.CacheMode.*;
 +import static org.apache.ignite.ignitefs.IgniteFsMode.*;
 +import static org.apache.ignite.ignitefs.hadoop.GridGgfsHadoopParameters.*;
 +
 +/**
 + * Ensures that sampling is really turned on/off.
 + */
 +public class GridGgfsHadoopFileSystemLoggerStateSelfTest extends 
GridGgfsCommonAbstractTest {
 +    /** GGFS. */
 +    private GridGgfsEx ggfs;
 +
 +    /** File system. */
 +    private FileSystem fs;
 +
 +    /** Whether logging is enabled in FS configuration. */
 +    private boolean logging;
 +
 +    /** whether sampling is enabled. */
 +    private Boolean sampling;
 +
 +    /** {@inheritDoc} */
 +    @Override protected void afterTest() throws Exception {
 +        U.closeQuiet(fs);
 +
 +        ggfs = null;
 +        fs = null;
 +
 +        G.stopAll(true);
 +
 +        logging = false;
 +        sampling = null;
 +    }
 +
 +    /**
 +     * Startup the grid and instantiate the file system.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    private void startUp() throws Exception {
 +        IgniteFsConfiguration ggfsCfg = new IgniteFsConfiguration();
 +
 +        ggfsCfg.setDataCacheName("partitioned");
 +        ggfsCfg.setMetaCacheName("replicated");
 +        ggfsCfg.setName("ggfs");
 +        ggfsCfg.setBlockSize(512 * 1024);
 +        ggfsCfg.setDefaultMode(PRIMARY);
 +        ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{
 +            put("type", "tcp");
 +            put("port", "10500");
 +        }});
 +
 +        CacheConfiguration cacheCfg = defaultCacheConfiguration();
 +
 +        cacheCfg.setName("partitioned");
 +        cacheCfg.setCacheMode(PARTITIONED);
 +        cacheCfg.setDistributionMode(CacheDistributionMode.PARTITIONED_ONLY);
 +        
cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
 +        cacheCfg.setAffinityMapper(new IgniteFsGroupDataBlocksKeyMapper(128));
 +        cacheCfg.setBackups(0);
 +        cacheCfg.setQueryIndexEnabled(false);
 +        cacheCfg.setAtomicityMode(TRANSACTIONAL);
 +
 +        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
 +
 +        metaCacheCfg.setName("replicated");
 +        metaCacheCfg.setCacheMode(REPLICATED);
 +        
metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
 +        metaCacheCfg.setQueryIndexEnabled(false);
 +        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
 +
 +        IgniteConfiguration cfg = new IgniteConfiguration();
 +
 +        cfg.setGridName("ggfs-grid");
 +
 +        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
 +
 +        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
 +
 +        cfg.setDiscoverySpi(discoSpi);
 +        cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
 +        cfg.setGgfsConfiguration(ggfsCfg);
 +
 +        cfg.setLocalHost("127.0.0.1");
 +        cfg.setRestEnabled(false);
 +
 +        Ignite g = G.start(cfg);
 +
 +        ggfs = (GridGgfsEx)g.fileSystem("ggfs");
 +
 +        ggfs.globalSampling(sampling);
 +
 +        fs = fileSystem();
 +    }
 +
 +    /**
 +     * When logging is disabled and sampling is not set no-op logger must be 
used.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testLoggingDisabledSamplingNotSet() throws Exception {
 +        startUp();
 +
 +        assert !logEnabled();
 +    }
 +
 +    /**
 +     * When logging is enabled and sampling is not set file logger must be 
used.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testLoggingEnabledSamplingNotSet() throws Exception {
 +        logging = true;
 +
 +        startUp();
 +
 +        assert logEnabled();
 +    }
 +
 +    /**
 +     * When logging is disabled and sampling is disabled no-op logger must be 
used.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testLoggingDisabledSamplingDisabled() throws Exception {
 +        sampling = false;
 +
 +        startUp();
 +
 +        assert !logEnabled();
 +    }
 +
 +    /**
 +     * When logging is enabled and sampling is disabled no-op logger must be 
used.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testLoggingEnabledSamplingDisabled() throws Exception {
 +        logging = true;
 +        sampling = false;
 +
 +        startUp();
 +
 +        assert !logEnabled();
 +    }
 +
 +    /**
 +     * When logging is disabled and sampling is enabled file logger must be 
used.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testLoggingDisabledSamplingEnabled() throws Exception {
 +        sampling = true;
 +
 +        startUp();
 +
 +        assert logEnabled();
 +    }
 +
 +    /**
 +     * When logging is enabled and sampling is enabled file logger must be 
used.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testLoggingEnabledSamplingEnabled() throws Exception {
 +        logging = true;
 +        sampling = true;
 +
 +        startUp();
 +
 +        assert logEnabled();
 +    }
 +
 +    /**
 +     * Ensure sampling change through API causes changes in logging on 
subsequent client connections.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testSamplingChange() throws Exception {
 +        // Start with sampling not set.
 +        startUp();
 +
 +        assert !logEnabled();
 +
 +        fs.close();
 +
 +        // "Not set" => true transition.
 +        ggfs.globalSampling(true);
 +
 +        fs = fileSystem();
 +
 +        assert logEnabled();
 +
 +        fs.close();
 +
 +        // True => "not set" transition.
 +        ggfs.globalSampling(null);
 +
 +        fs = fileSystem();
 +
 +        assert !logEnabled();
 +
 +        // "Not-set" => false transition.
 +        ggfs.globalSampling(false);
 +
 +        fs = fileSystem();
 +
 +        assert !logEnabled();
 +
 +        fs.close();
 +
 +        // False => "not=set" transition.
 +        ggfs.globalSampling(null);
 +
 +        fs = fileSystem();
 +
 +        assert !logEnabled();
 +
 +        fs.close();
 +
 +        // True => false transition.
 +        ggfs.globalSampling(true);
 +        ggfs.globalSampling(false);
 +
 +        fs = fileSystem();
 +
 +        assert !logEnabled();
 +
 +        fs.close();
 +
 +        // False => true transition.
 +        ggfs.globalSampling(true);
 +
 +        fs = fileSystem();
 +
 +        assert logEnabled();
 +    }
 +
 +    /**
 +     * Ensure that log directory is set to GGFS when client FS connects.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    @SuppressWarnings("ConstantConditions")
 +    public void testLogDirectory() throws Exception {
 +        startUp();
 +
-         assertEquals(Paths.get(U.getGridGainHome()).normalize().toString(),
++        assertEquals(Paths.get(U.getIgniteHome()).normalize().toString(),
 +            ggfs.clientLogDirectory());
 +    }
 +
 +    /**
 +     * Instantiate new file system.
 +     *
 +     * @return New file system.
 +     * @throws Exception If failed.
 +     */
 +    private GridGgfsHadoopFileSystem fileSystem() throws Exception {
 +        Configuration fsCfg = new Configuration();
 +
-         
fsCfg.addResource(U.resolveGridGainUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));
++        
fsCfg.addResource(U.resolveIgniteUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));
 +
 +        fsCfg.setBoolean("fs.ggfs.impl.disable.cache", true);
 +
 +        if (logging)
 +            fsCfg.setBoolean(String.format(PARAM_GGFS_LOG_ENABLED, 
"ggfs:ggfs-grid@"), logging);
 +
-         fsCfg.setStrings(String.format(PARAM_GGFS_LOG_DIR, 
"ggfs:ggfs-grid@"), U.getGridGainHome());
++        fsCfg.setStrings(String.format(PARAM_GGFS_LOG_DIR, 
"ggfs:ggfs-grid@"), U.getIgniteHome());
 +
 +        return (GridGgfsHadoopFileSystem)FileSystem.get(new 
URI("ggfs://ggfs:ggfs-grid@/"), fsCfg);
 +    }
 +
 +    /**
 +     * Ensure that real logger is used by the file system.
 +     *
 +     * @return {@code True} in case path is secondary.
 +     * @throws Exception If failed.
 +     */
 +    private boolean logEnabled() throws Exception {
 +        assert fs != null;
 +
 +        Field field = fs.getClass().getDeclaredField("clientLog");
 +
 +        field.setAccessible(true);
 +
 +        return ((GridGgfsLogger)field.get(fs)).isLogEnabled();
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemSecondaryModeSelfTest.java
----------------------------------------------------------------------
diff --cc 
modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemSecondaryModeSelfTest.java
index 6cad6ab,0000000..e23ba6f
mode 100644,000000..100644
--- 
a/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemSecondaryModeSelfTest.java
+++ 
b/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsHadoopFileSystemSecondaryModeSelfTest.java
@@@ -1,319 -1,0 +1,319 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.ignite.ignitefs;
 +
 +import org.apache.hadoop.conf.*;
 +import org.apache.hadoop.fs.*;
 +import org.apache.ignite.cache.*;
 +import org.apache.ignite.configuration.*;
 +import org.apache.ignite.ignitefs.hadoop.v1.*;
 +import org.apache.ignite.internal.fs.hadoop.*;
 +import org.apache.ignite.internal.processors.fs.*;
 +import org.apache.ignite.internal.util.typedef.*;
 +import org.apache.ignite.internal.util.typedef.internal.*;
 +import org.apache.ignite.lang.*;
 +import org.apache.ignite.spi.discovery.tcp.*;
 +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.*;
 +
 +import java.net.*;
 +import java.util.*;
 +
 +import static org.apache.ignite.cache.CacheAtomicityMode.*;
 +import static org.apache.ignite.cache.CacheMode.*;
 +import static org.apache.ignite.ignitefs.IgniteFsMode.*;
 +
 +/**
 + * Ensures correct modes resolution for SECONDARY paths.
 + */
 +public class GridGgfsHadoopFileSystemSecondaryModeSelfTest extends 
GridGgfsCommonAbstractTest {
 +    /** Path to check. */
 +    private static final Path PATH = new Path("/dir");
 +
 +    /** Pattern matching the path. */
 +    private static final String PATTERN_MATCHES = "/dir";
 +
 +    /** Pattern doesn't matching the path. */
 +    private static final String PATTERN_NOT_MATCHES = "/files";
 +
 +    /** Default GGFS mode. */
 +    private IgniteFsMode mode;
 +
 +    /** Path modes. */
 +    private Map<String, IgniteFsMode> pathModes;
 +
 +    /** File system. */
 +    private GridGgfsHadoopFileSystem fs;
 +
 +    /** {@inheritDoc} */
 +    @Override protected void beforeTest() throws Exception {
 +        mode = null;
 +        pathModes = null;
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override protected void afterTest() throws Exception {
 +        U.closeQuiet(fs);
 +
 +        fs = null;
 +
 +        G.stopAll(true);
 +    }
 +
 +    /**
 +     * Perform initial startup.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    @SuppressWarnings("NullableProblems")
 +    private void startUp() throws Exception {
 +        startUpSecondary();
 +
 +        IgniteFsConfiguration ggfsCfg = new IgniteFsConfiguration();
 +
 +        ggfsCfg.setDataCacheName("partitioned");
 +        ggfsCfg.setMetaCacheName("replicated");
 +        ggfsCfg.setName("ggfs");
 +        ggfsCfg.setBlockSize(512 * 1024);
 +        ggfsCfg.setDefaultMode(mode);
 +        ggfsCfg.setPathModes(pathModes);
 +        ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{
 +            put("type", "tcp");
 +            put("port", "10500");
 +        }});
 +
 +        ggfsCfg.setManagementPort(-1);
 +        ggfsCfg.setSecondaryFileSystem(new GridGgfsHadoopFileSystemWrapper(
 +            "ggfs://ggfs-secondary:ggfs-grid-secondary@127.0.0.1:11500/",
 +            
"modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml"));
 +
 +        CacheConfiguration cacheCfg = defaultCacheConfiguration();
 +
 +        cacheCfg.setName("partitioned");
 +        cacheCfg.setCacheMode(PARTITIONED);
 +        cacheCfg.setDistributionMode(CacheDistributionMode.PARTITIONED_ONLY);
 +        
cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
 +        cacheCfg.setAffinityMapper(new IgniteFsGroupDataBlocksKeyMapper(128));
 +        cacheCfg.setBackups(0);
 +        cacheCfg.setQueryIndexEnabled(false);
 +        cacheCfg.setAtomicityMode(TRANSACTIONAL);
 +
 +        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
 +
 +        metaCacheCfg.setName("replicated");
 +        metaCacheCfg.setCacheMode(REPLICATED);
 +        
metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
 +        metaCacheCfg.setQueryIndexEnabled(false);
 +        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
 +
 +        IgniteConfiguration cfg = new IgniteConfiguration();
 +
 +        cfg.setGridName("ggfs-grid");
 +
 +        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
 +
 +        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
 +
 +        cfg.setDiscoverySpi(discoSpi);
 +        cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
 +        cfg.setGgfsConfiguration(ggfsCfg);
 +
 +        cfg.setLocalHost("127.0.0.1");
 +
 +        G.start(cfg);
 +
 +        Configuration fsCfg = new Configuration();
 +
-         
fsCfg.addResource(U.resolveGridGainUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));
++        
fsCfg.addResource(U.resolveIgniteUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));
 +
 +        fsCfg.setBoolean("fs.ggfs.impl.disable.cache", true);
 +
 +        fs = (GridGgfsHadoopFileSystem)FileSystem.get(new 
URI("ggfs://ggfs:ggfs-grid@/"), fsCfg);
 +    }
 +
 +    /**
 +     * Startup secondary file system.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    private void startUpSecondary() throws Exception {
 +        IgniteFsConfiguration ggfsCfg = new IgniteFsConfiguration();
 +
 +        ggfsCfg.setDataCacheName("partitioned");
 +        ggfsCfg.setMetaCacheName("replicated");
 +        ggfsCfg.setName("ggfs-secondary");
 +        ggfsCfg.setBlockSize(512 * 1024);
 +        ggfsCfg.setDefaultMode(PRIMARY);
 +        ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{
 +            put("type", "tcp");
 +            put("port", "11500");
 +        }});
 +
 +        CacheConfiguration cacheCfg = defaultCacheConfiguration();
 +
 +        cacheCfg.setName("partitioned");
 +        cacheCfg.setCacheMode(PARTITIONED);
 +        cacheCfg.setDistributionMode(CacheDistributionMode.PARTITIONED_ONLY);
 +        
cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
 +        cacheCfg.setAffinityMapper(new IgniteFsGroupDataBlocksKeyMapper(128));
 +        cacheCfg.setBackups(0);
 +        cacheCfg.setQueryIndexEnabled(false);
 +        cacheCfg.setAtomicityMode(TRANSACTIONAL);
 +
 +        CacheConfiguration metaCacheCfg = defaultCacheConfiguration();
 +
 +        metaCacheCfg.setName("replicated");
 +        metaCacheCfg.setCacheMode(REPLICATED);
 +        
metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
 +        metaCacheCfg.setQueryIndexEnabled(false);
 +        metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
 +
 +        IgniteConfiguration cfg = new IgniteConfiguration();
 +
 +        cfg.setGridName("ggfs-grid-secondary");
 +
 +        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
 +
 +        discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));
 +
 +        cfg.setDiscoverySpi(discoSpi);
 +        cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
 +        cfg.setGgfsConfiguration(ggfsCfg);
 +
 +        cfg.setLocalHost("127.0.0.1");
 +
 +        G.start(cfg);
 +    }
 +
 +    /**
 +     * Check path resolution when secondary mode is not default and there are 
no other exclusion paths.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testSecondaryNotDefaultNoExclusions() throws Exception {
 +        mode = PRIMARY;
 +
 +        startUp();
 +
 +        assert !secondary(PATH);
 +        assert !secondary(PATH);
 +    }
 +
 +    /**
 +     * Check path resolution when secondary mode is not default and there is 
no matching exclusion paths.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testSecondaryNotDefaultNonMatchingExclusion() throws 
Exception {
 +        mode = PRIMARY;
 +
 +        pathModes(F.t(PATTERN_NOT_MATCHES, PROXY));
 +
 +        startUp();
 +
 +        assert !secondary(PATH);
 +        assert !secondary(PATH);
 +    }
 +
 +    /**
 +     * Check path resolution when secondary mode is not default and there is 
matching exclusion path.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testSecondaryNotDefaultMatchingExclusion() throws Exception {
 +        mode = PRIMARY;
 +
 +        pathModes(F.t(PATTERN_NOT_MATCHES, PROXY), F.t(PATTERN_MATCHES, 
PROXY));
 +
 +        startUp();
 +
 +        assert secondary(PATH);
 +        assert secondary(PATH);
 +    }
 +
 +    /**
 +     * Check path resolution when secondary mode is default and there is no 
exclusion paths.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testSecondaryDefaultNoExclusions() throws Exception {
 +        mode = PROXY;
 +
 +        startUp();
 +
 +        assert secondary(PATH);
 +        assert secondary(PATH);
 +    }
 +
 +    /**
 +     * Check path resolution when secondary mode is default and there is no 
matching exclusion paths.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testSecondaryDefaultNonMatchingExclusion() throws Exception {
 +        mode = PROXY;
 +
 +        pathModes(F.t(PATTERN_NOT_MATCHES, PRIMARY));
 +
 +        startUp();
 +
 +        assert secondary(PATH);
 +        assert secondary(PATH);
 +    }
 +
 +    /**
 +     * Check path resolution when secondary mode is default and there is no 
matching exclusion paths.
 +     *
 +     * @throws Exception If failed.
 +     */
 +    public void testSecondaryDefaultMatchingExclusion() throws Exception {
 +        mode = PROXY;
 +
 +        pathModes(F.t(PATTERN_NOT_MATCHES, PRIMARY), F.t(PATTERN_MATCHES, 
PRIMARY));
 +
 +        startUp();
 +
 +        assert !secondary(PATH);
 +        assert !secondary(PATH);
 +    }
 +
 +    /**
 +     * Set GGFS modes for particular paths.
 +     *
 +     * @param modes Modes.
 +     */
 +    @SafeVarargs
 +    final void pathModes(IgniteBiTuple<String, IgniteFsMode>... modes) {
 +        assert modes != null;
 +
 +        pathModes = new LinkedHashMap<>(modes.length, 1.0f);
 +
 +        for (IgniteBiTuple<String, IgniteFsMode> mode : modes)
 +            pathModes.put(mode.getKey(), mode.getValue());
 +    }
 +
 +    /**
 +     * Check whether the given path is threaten as SECONDARY in the file 
system.
 +     *
 +     * @param path Path to check.
 +     * @return {@code True} in case path is secondary.
 +     * @throws Exception If failed.
 +     */
 +    private boolean secondary(Path path) throws Exception {
 +        return fs.mode(path) == PROXY;
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsNearOnlyMultiNodeSelfTest.java
----------------------------------------------------------------------
diff --cc 
modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsNearOnlyMultiNodeSelfTest.java
index 93b8049,0000000..7a25959
mode 100644,000000..100644
--- 
a/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsNearOnlyMultiNodeSelfTest.java
+++ 
b/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/GridGgfsNearOnlyMultiNodeSelfTest.java
@@@ -1,212 -1,0 +1,212 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.ignite.ignitefs;
 +
 +import org.apache.hadoop.conf.*;
 +import org.apache.hadoop.fs.*;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.ignite.cache.*;
 +import org.apache.ignite.configuration.*;
 +import org.apache.ignite.internal.util.ipc.shmem.*;
 +import org.apache.ignite.internal.util.typedef.*;
 +import org.apache.ignite.internal.util.typedef.internal.*;
 +import org.apache.ignite.lang.*;
 +import org.apache.ignite.spi.discovery.tcp.*;
 +import org.apache.ignite.spi.discovery.tcp.ipfinder.*;
 +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.*;
 +import org.apache.ignite.testframework.junits.common.*;
 +
 +import java.io.*;
 +import java.net.*;
 +import java.util.*;
 +
 +import static org.apache.ignite.cache.CacheAtomicityMode.*;
 +import static org.apache.ignite.cache.CacheDistributionMode.*;
 +import static org.apache.ignite.cache.CacheMode.*;
 +import static org.apache.ignite.events.EventType.*;
 +
 +/**
 + * Test hadoop file system implementation.
 + */
 +public class GridGgfsNearOnlyMultiNodeSelfTest extends GridCommonAbstractTest 
{
 +    /** Path to the default hadoop configuration. */
 +    public static final String HADOOP_FS_CFG = 
"examples/config/filesystem/core-site.xml";
 +
 +    /** Group size. */
 +    public static final int GRP_SIZE = 128;
 +
 +    /** IP finder. */
 +    private static final TcpDiscoveryIpFinder IP_FINDER = new 
TcpDiscoveryVmIpFinder(true);
 +
 +    /** Node count. */
 +    private int cnt;
 +
 +    /** {@inheritDoc} */
 +    @Override protected void beforeTestsStarted() throws Exception {
 +        startGrids(nodeCount());
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override protected void afterTestsStopped() throws Exception {
 +        G.stopAll(true);
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override protected IgniteConfiguration getConfiguration(String gridName) 
throws Exception {
 +        IgniteConfiguration cfg = super.getConfiguration(gridName);
 +
 +        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
 +        discoSpi.setIpFinder(IP_FINDER);
 +
 +        cfg.setDiscoverySpi(discoSpi);
 +
 +        IgniteFsConfiguration ggfsCfg = new IgniteFsConfiguration();
 +
 +        ggfsCfg.setDataCacheName("partitioned");
 +        ggfsCfg.setMetaCacheName("partitioned");
 +        ggfsCfg.setName("ggfs");
 +
 +        ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {{
 +            put("type", "shmem");
 +            put("port", 
String.valueOf(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + cnt));
 +        }});
 +
 +        ggfsCfg.setBlockSize(512 * 1024); // Together with group blocks 
mapper will yield 64M per node groups.
 +
 +        cfg.setGgfsConfiguration(ggfsCfg);
 +
 +        cfg.setCacheConfiguration(cacheConfiguration(gridName));
 +
 +        cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, 
EVT_JOB_MAPPED);
 +
 +        cnt++;
 +
 +        return cfg;
 +    }
 +
 +    /** @return Node count for test. */
 +    protected int nodeCount() {
 +        return 4;
 +    }
 +
 +    /**
 +     * Gets cache configuration.
 +     *
 +     * @param gridName Grid name.
 +     * @return Cache configuration.
 +     */
 +    protected CacheConfiguration cacheConfiguration(String gridName) {
 +        CacheConfiguration cacheCfg = defaultCacheConfiguration();
 +
 +        cacheCfg.setName("partitioned");
 +        cacheCfg.setCacheMode(PARTITIONED);
 +        cacheCfg.setDistributionMode(cnt == 0 ? NEAR_ONLY : PARTITIONED_ONLY);
 +        
cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
 +        cacheCfg.setAffinityMapper(new 
IgniteFsGroupDataBlocksKeyMapper(GRP_SIZE));
 +        cacheCfg.setBackups(0);
 +        cacheCfg.setQueryIndexEnabled(false);
 +        cacheCfg.setAtomicityMode(TRANSACTIONAL);
 +
 +        return cacheCfg;
 +    }
 +
 +    /**
 +     * Gets config of concrete File System.
 +     *
 +     * @return Config of concrete File System.
 +     */
 +    protected Configuration getFileSystemConfig() {
 +        Configuration cfg = new Configuration();
 +
-         cfg.addResource(U.resolveGridGainUrl(HADOOP_FS_CFG));
++        cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG));
 +
 +        return cfg;
 +    }
 +
 +    /**
 +     * Gets File System name.
 +     *
 +     * @param grid Grid index.
 +     * @return File System name.
 +     */
 +    protected URI getFileSystemURI(int grid) {
 +        try {
 +            return new URI("ggfs://127.0.0.1:" + 
(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + grid));
 +        }
 +        catch (URISyntaxException e) {
 +            throw new RuntimeException(e);
 +        }
 +    }
 +
 +    /** @throws Exception If failed. */
 +    public void testContentsConsistency() throws Exception {
 +        try (FileSystem fs = FileSystem.get(getFileSystemURI(0), 
getFileSystemConfig())) {
 +            Collection<IgniteBiTuple<String, Long>> files = F.asList(
 +                F.t("/dir1/dir2/file1", 1024L),
 +                F.t("/dir1/dir2/file2", 8 * 1024L),
 +                F.t("/dir1/file1", 1024 * 1024L),
 +                F.t("/dir1/file2", 5 * 1024 * 1024L),
 +                F.t("/file1", 64 * 1024L + 13),
 +                F.t("/file2", 13L),
 +                F.t("/file3", 123764L)
 +            );
 +
 +            for (IgniteBiTuple<String, Long> file : files) {
 +
 +                info("Writing file: " + file.get1());
 +
 +                try (OutputStream os = fs.create(new Path(file.get1()), 
(short)3)) {
 +                    byte[] data = new byte[file.get2().intValue()];
 +
 +                    data[0] = 25;
 +                    data[data.length - 1] = 26;
 +
 +                    os.write(data);
 +                }
 +
 +                info("Finished writing file: " + file.get1());
 +            }
 +
 +            for (int i = 1; i < nodeCount(); i++) {
 +
 +                try (FileSystem ignored = FileSystem.get(getFileSystemURI(i), 
getFileSystemConfig())) {
 +                    for (IgniteBiTuple<String, Long> file : files) {
 +                        Path path = new Path(file.get1());
 +
 +                        FileStatus fileStatus = fs.getFileStatus(path);
 +
 +                        assertEquals(file.get2(), (Long)fileStatus.getLen());
 +
 +                        byte[] read = new byte[file.get2().intValue()];
 +
 +                        info("Reading file: " + path);
 +
 +                        try (FSDataInputStream in = fs.open(path)) {
 +                            in.readFully(read);
 +
 +                            assert read[0] == 25;
 +                            assert read[read.length - 1] == 26;
 +                        }
 +
 +                        info("Finished reading file: " + path);
 +                    }
 +                }
 +            }
 +        }
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/IgniteFsEventsTestSuite.java
----------------------------------------------------------------------
diff --cc 
modules/hadoop/src/test/java/org/apache/ignite/ignitefs/IgniteFsEventsTestSuite.java
index 14b78ae,0000000..a040dcc
mode 100644,000000..100644
--- 
a/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/IgniteFsEventsTestSuite.java
+++ 
b/modules/hadoop/src/test/java/org/apache/ignite/ignitefs/IgniteFsEventsTestSuite.java
@@@ -1,267 -1,0 +1,267 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.ignite.ignitefs;
 +
 +import junit.framework.*;
 +import org.apache.ignite.*;
 +import org.apache.ignite.configuration.*;
 +import org.apache.ignite.internal.fs.hadoop.*;
 +import org.apache.ignite.internal.processors.hadoop.*;
 +import org.apache.ignite.internal.util.ipc.shmem.*;
 +import org.apache.ignite.internal.util.typedef.*;
 +import org.jetbrains.annotations.*;
 +
 +import java.util.*;
 +
 +import static org.apache.ignite.ignitefs.IgniteFsMode.*;
 +
 +/**
 + * Test suite for GGFS event tests.
 + */
 +@SuppressWarnings("PublicInnerClass")
 +public class IgniteFsEventsTestSuite extends TestSuite {
 +    /**
 +     * @return Test suite.
 +     * @throws Exception Thrown in case of the failure.
 +     */
 +    public static TestSuite suite() throws Exception {
 +        GridHadoopClassLoader ldr = new GridHadoopClassLoader(null);
 +
 +        TestSuite suite = new TestSuite("Ignite FS Events Test Suite");
 +
 +        suite.addTest(new 
TestSuite(ldr.loadClass(ShmemPrivate.class.getName())));
 +        suite.addTest(new 
TestSuite(ldr.loadClass(ShmemDualSync.class.getName())));
 +        suite.addTest(new 
TestSuite(ldr.loadClass(ShmemDualAsync.class.getName())));
 +
 +        suite.addTest(new 
TestSuite(ldr.loadClass(LoopbackPrivate.class.getName())));
 +        suite.addTest(new 
TestSuite(ldr.loadClass(LoopbackDualSync.class.getName())));
 +        suite.addTest(new 
TestSuite(ldr.loadClass(LoopbackDualAsync.class.getName())));
 +
 +        return suite;
 +    }
 +
 +    /**
 +     * @return Test suite with only tests that are supported on all platforms.
 +     * @throws Exception Thrown in case of the failure.
 +     */
 +    public static TestSuite suiteNoarchOnly() throws Exception {
 +        GridHadoopClassLoader ldr = new GridHadoopClassLoader(null);
 +
-         TestSuite suite = new TestSuite("Gridgain GGFS Events Test Suite 
Noarch Only");
++        TestSuite suite = new TestSuite("Ignite GGFS Events Test Suite Noarch 
Only");
 +
 +        suite.addTest(new 
TestSuite(ldr.loadClass(LoopbackPrivate.class.getName())));
 +        suite.addTest(new 
TestSuite(ldr.loadClass(LoopbackDualSync.class.getName())));
 +        suite.addTest(new 
TestSuite(ldr.loadClass(LoopbackDualAsync.class.getName())));
 +
 +        return suite;
 +    }
 +
 +    /**
 +     * Shared memory IPC in PRIVATE mode.
 +     */
 +    public static class ShmemPrivate extends GridGgfsEventsAbstractSelfTest {
 +        /** {@inheritDoc} */
 +        @Override protected IgniteFsConfiguration getGgfsConfiguration() 
throws IgniteCheckedException {
 +            IgniteFsConfiguration ggfsCfg = super.getGgfsConfiguration();
 +
 +            ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() 
{{
 +                put("type", "shmem");
 +                put("port", 
String.valueOf(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + 1));
 +            }});
 +
 +            return ggfsCfg;
 +        }
 +    }
 +
 +    /**
 +     * Loopback socket IPS in PRIVATE mode.
 +     */
 +    public static class LoopbackPrivate extends 
GridGgfsEventsAbstractSelfTest {
 +        /** {@inheritDoc} */
 +        @Override protected IgniteFsConfiguration getGgfsConfiguration() 
throws IgniteCheckedException {
 +            IgniteFsConfiguration ggfsCfg = super.getGgfsConfiguration();
 +
 +            ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() 
{{
 +                put("type", "tcp");
 +                put("port", 
String.valueOf(IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + 1));
 +            }});
 +
 +            return ggfsCfg;
 +        }
 +    }
 +
 +    /**
 +     * Base class for all GGFS tests with primary and secondary file system.
 +     */
 +    public abstract static class PrimarySecondaryTest extends 
GridGgfsEventsAbstractSelfTest {
 +        /** Secondary file system. */
 +        private static IgniteFs ggfsSec;
 +
 +        /** {@inheritDoc} */
 +        @Override protected IgniteFsConfiguration getGgfsConfiguration() 
throws IgniteCheckedException {
 +            IgniteFsConfiguration ggfsCfg = super.getGgfsConfiguration();
 +
 +            ggfsCfg.setSecondaryFileSystem(new 
GridGgfsHadoopFileSystemWrapper(
 +                "ggfs://ggfs-secondary:grid-secondary@127.0.0.1:11500/",
 +                
"modules/core/src/test/config/hadoop/core-site-secondary.xml"));
 +
 +            return ggfsCfg;
 +        }
 +
 +        /**
 +         * @return GGFS configuration for secondary file system.
 +         */
 +        protected IgniteFsConfiguration getSecondaryGgfsConfiguration() 
throws IgniteCheckedException {
 +            IgniteFsConfiguration ggfsCfg = super.getGgfsConfiguration();
 +
 +            ggfsCfg.setName("ggfs-secondary");
 +            ggfsCfg.setDefaultMode(PRIMARY);
 +            ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, 
String>(){{
 +                put("type", "tcp");
 +                put("port", "11500");
 +            }});
 +
 +            return ggfsCfg;
 +        }
 +
 +        /** {@inheritDoc} */
 +        @Override protected void beforeTestsStarted() throws Exception {
 +            ggfsSec = startSecondary();
 +
 +            super.beforeTestsStarted();
 +        }
 +
 +        /** {@inheritDoc} */
 +        @Override protected void afterTestsStopped() throws Exception {
 +            super.afterTestsStopped();
 +
 +            G.stopAll(true);
 +        }
 +
 +        /** {@inheritDoc} */
 +        @Override protected void afterTest() throws Exception {
 +            super.afterTest();
 +
 +            // Clean up secondary file system.
 +            ggfsSec.format();
 +        }
 +
 +        /**
 +         * Start a grid with the secondary file system.
 +         *
 +         * @return Secondary file system handle.
 +         * @throws Exception If failed.
 +         */
 +        @Nullable private IgniteFs startSecondary() throws Exception {
 +            IgniteConfiguration cfg = getConfiguration("grid-secondary", 
getSecondaryGgfsConfiguration());
 +
 +            cfg.setLocalHost("127.0.0.1");
 +            cfg.setPeerClassLoadingEnabled(false);
 +
 +            Ignite secG = G.start(cfg);
 +
 +            return secG.fileSystem("ggfs-secondary");
 +        }
 +    }
 +
 +    /**
 +     * Shared memory IPC in DUAL_SYNC mode.
 +     */
 +    public static class ShmemDualSync extends PrimarySecondaryTest {
 +        /** {@inheritDoc} */
 +        @Override protected IgniteFsConfiguration getGgfsConfiguration() 
throws IgniteCheckedException {
 +            IgniteFsConfiguration ggfsCfg = super.getGgfsConfiguration();
 +
 +            ggfsCfg.setDefaultMode(DUAL_SYNC);
 +
 +            return ggfsCfg;
 +        }
 +    }
 +
 +    /**
 +     * Shared memory IPC in DUAL_SYNC mode.
 +     */
 +    public static class ShmemDualAsync extends PrimarySecondaryTest {
 +        /** {@inheritDoc} */
 +        @Override protected IgniteFsConfiguration getGgfsConfiguration() 
throws IgniteCheckedException {
 +            IgniteFsConfiguration ggfsCfg = super.getGgfsConfiguration();
 +
 +            ggfsCfg.setDefaultMode(DUAL_ASYNC);
 +
 +            return ggfsCfg;
 +        }
 +    }
 +
 +    /**
 +     * Loopback socket IPC with secondary file system.
 +     */
 +    public abstract static class LoopbackPrimarySecondaryTest extends 
PrimarySecondaryTest {
 +        /** {@inheritDoc} */
 +        @Override protected IgniteFsConfiguration getGgfsConfiguration() 
throws IgniteCheckedException {
 +            IgniteFsConfiguration ggfsCfg = super.getGgfsConfiguration();
 +
 +            ggfsCfg.setSecondaryFileSystem(new 
GridGgfsHadoopFileSystemWrapper(
 +                "ggfs://ggfs-secondary:grid-secondary@127.0.0.1:11500/",
 +                
"modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml"));
 +
 +            return ggfsCfg;
 +        }
 +
 +        /** {@inheritDoc} */
 +        @Override protected IgniteFsConfiguration 
getSecondaryGgfsConfiguration() throws IgniteCheckedException {
 +            IgniteFsConfiguration ggfsCfg = 
super.getSecondaryGgfsConfiguration();
 +
 +            ggfsCfg.setName("ggfs-secondary");
 +            ggfsCfg.setDefaultMode(PRIMARY);
 +            ggfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() 
{{
 +                put("type", "tcp");
 +                put("port", "11500");
 +            }});
 +
 +            return ggfsCfg;
 +        }
 +    }
 +
 +    /**
 +     * Loopback IPC in DUAL_SYNC mode.
 +     */
 +    public static class LoopbackDualSync extends LoopbackPrimarySecondaryTest 
{
 +        /** {@inheritDoc} */
 +        @Override protected IgniteFsConfiguration getGgfsConfiguration() 
throws IgniteCheckedException {
 +            IgniteFsConfiguration ggfsCfg = super.getGgfsConfiguration();
 +
 +            ggfsCfg.setDefaultMode(DUAL_SYNC);
 +
 +            return ggfsCfg;
 +        }
 +    }
 +
 +    /**
 +     * Loopback socket IPC in DUAL_ASYNC mode.
 +     */
 +    public static class LoopbackDualAsync extends 
LoopbackPrimarySecondaryTest {
 +        /** {@inheritDoc} */
 +        @Override protected IgniteFsConfiguration getGgfsConfiguration() 
throws IgniteCheckedException {
 +            IgniteFsConfiguration ggfsCfg = super.getGgfsConfiguration();
 +
 +            ggfsCfg.setDefaultMode(DUAL_ASYNC);
 +
 +            return ggfsCfg;
 +        }
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopCommandLineTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteFsLinuxAndMacOSTestSuite.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/hibernate/src/main/java/org/apache/ignite/cache/store/hibernate/CacheHibernateBlobStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCachePartitionedHitsAndMissesSelfTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/sql/GridQueryParsingTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/jcl/src/main/java/org/apache/ignite/logger/jcl/IgniteJclLogger.java
----------------------------------------------------------------------
diff --cc 
modules/jcl/src/main/java/org/apache/ignite/logger/jcl/IgniteJclLogger.java
index 0000000,2d6629f..87be646
mode 000000,100644..100644
--- 
a/modules/jcl/src/main/java/org/apache/ignite/logger/jcl/IgniteJclLogger.java
+++ 
b/modules/jcl/src/main/java/org/apache/ignite/logger/jcl/IgniteJclLogger.java
@@@ -1,0 -1,170 +1,170 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *      http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.ignite.logger.jcl;
+ 
+ import org.apache.commons.logging.*;
+ import org.apache.ignite.*;
+ import org.jetbrains.annotations.*;
+ 
+ /**
+  * This logger wraps any JCL (<a target=_blank 
href="http://jakarta.apache.org/commons/logging/";>Jakarta Commons Logging</a>)
+  * loggers. Implementation simply delegates to underlying JCL logger. This 
logger
+  * should be used by loaders that have JCL-based internal logging (e.g., 
Websphere).
+  * <p>
+  * Here is an example of configuring JCL logger in Ignite configuration Spring
+  * file to work over log4j implementation. Note that we use the same 
configuration file
+  * as we provide by default:
+  * <pre name="code" class="xml">
+  *      ...
+  *      &lt;property name="gridLogger"&gt;
+  *          &lt;bean class="org.apache.ignite.logger.jcl.IgniteJclLogger"&gt;
+  *              &lt;constructor-arg type="org.apache.commons.logging.Log"&gt;
+  *                  &lt;bean 
class="org.apache.commons.logging.impl.Log4JLogger"&gt;
+  *                      &lt;constructor-arg type="java.lang.String" 
value="config/ignite-log4j.xml"/&gt;
+  *                  &lt;/bean&gt;
+  *              &lt;/constructor-arg&gt;
+  *          &lt;/bean&gt;
+  *      &lt;/property&gt;
+  *      ...
+  * </pre>
+  * If you are using system properties to configure JCL logger use following 
configuration:
+  * <pre name="code" class="xml">
+  *      ...
+  *      &lt;property name="gridLogger"&gt;
+  *          &lt;bean class="org.apache.ignite.logger.jcl.IgniteJclLogger"/&gt;
+  *      &lt;/property&gt;
+  *      ...
+  * </pre>
+  * And the same configuration if you'd like to configure Ignite in your code:
+  * <pre name="code" class="java">
+  *      GridConfiguration cfg = new GridConfiguration();
+  *      ...
+  *      GridLogger log = new IgniteJclLogger(new 
Log4JLogger("config/ignite-log4j.xml"));
+  *      ...
+  *      cfg.setGridLogger(log);
+  * </pre>
+  * or following for the configuration by means of system properties:
+  * <pre name="code" class="java">
+  *      GridConfiguration cfg = new GridConfiguration();
+  *      ...
+  *      GridLogger log = new IgniteJclLogger();
+  *      ...
+  *      cfg.setGridLogger(log);
+  * </pre>
+  *
+  * <p>
+  * It's recommended to use Ignite logger injection instead of 
using/instantiating
 - * logger in your task/job code. See {@link 
org.apache.ignite.resources.IgniteLoggerResource} annotation about logger
++ * logger in your task/job code. See {@link 
org.apache.ignite.resources.LoggerResource} annotation about logger
+  * injection.
+  */
+ public class IgniteJclLogger implements IgniteLogger {
+     /** */
+     private static final long serialVersionUID = 0L;
+ 
+     /** JCL implementation proxy. */
+     private Log impl;
+ 
+     /**
+      * Creates new logger.
+      */
+     public IgniteJclLogger() {
+         this(LogFactory.getLog(IgniteJclLogger.class.getName()));
+     }
+ 
+     /**
+      * Creates new logger with given implementation.
+      *
+      * @param impl JCL implementation to use.
+      */
+     public IgniteJclLogger(Log impl) {
+         assert impl != null;
+ 
+         this.impl = impl;
+     }
+ 
+     /** {@inheritDoc} */
+     @Override public IgniteLogger getLogger(Object ctgr) {
+         return new IgniteJclLogger(LogFactory.getLog(
+             ctgr instanceof Class ? ((Class)ctgr).getName() : 
String.valueOf(ctgr)));
+     }
+ 
+     /** {@inheritDoc} */
+     @Override public void trace(String msg) {
+         impl.trace(msg);
+     }
+ 
+     /** {@inheritDoc} */
+     @Override public void debug(String msg) {
+         impl.debug(msg);
+     }
+ 
+     /** {@inheritDoc} */
+     @Override public void info(String msg) {
+         impl.info(msg);
+     }
+ 
+     /** {@inheritDoc} */
+     @Override public void warning(String msg) {
+         impl.warn(msg);
+     }
+ 
+     /** {@inheritDoc} */
+     @Override public void warning(String msg, @Nullable Throwable e) {
+         impl.warn(msg, e);
+     }
+ 
+     /** {@inheritDoc} */
+     @Override public void error(String msg) {
+         impl.error(msg);
+     }
+ 
+     /** {@inheritDoc} */
+     @Override public boolean isQuiet() {
+         return !isInfoEnabled() && !isDebugEnabled();
+     }
+ 
+     /** {@inheritDoc} */
+     @Override public void error(String msg, @Nullable Throwable e) {
+         impl.error(msg, e);
+     }
+ 
+     /** {@inheritDoc} */
+     @Override public boolean isTraceEnabled() {
+         return impl.isTraceEnabled();
+     }
+ 
+     /** {@inheritDoc} */
+     @Override public boolean isDebugEnabled() {
+         return impl.isDebugEnabled();
+     }
+ 
+     /** {@inheritDoc} */
+     @Override public boolean isInfoEnabled() {
+         return impl.isInfoEnabled();
+     }
+ 
+     /** {@inheritDoc} */
+     @Nullable @Override public String fileName() {
+         return null;
+     }
+ 
+     /** {@inheritDoc} */
+     @Override public String toString() {
+         return "IgniteJclLogger [impl=" + impl + ']';
+     }
+ }

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/log4j/src/main/java/org/apache/ignite/logger/log4j/Log4JLogger.java
----------------------------------------------------------------------
diff --cc 
modules/log4j/src/main/java/org/apache/ignite/logger/log4j/Log4JLogger.java
index b170ebc,0000000..561acd4
mode 100644,000000..100644
--- 
a/modules/log4j/src/main/java/org/apache/ignite/logger/log4j/Log4JLogger.java
+++ 
b/modules/log4j/src/main/java/org/apache/ignite/logger/log4j/Log4JLogger.java
@@@ -1,522 -1,0 +1,522 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.ignite.logger.log4j;
 +
 +import org.apache.ignite.*;
 +import org.apache.ignite.internal.util.*;
 +import org.apache.ignite.internal.util.tostring.*;
 +import org.apache.ignite.internal.util.typedef.*;
 +import org.apache.ignite.internal.util.typedef.internal.*;
 +import org.apache.ignite.lang.*;
 +import org.apache.ignite.logger.*;
 +import org.apache.log4j.*;
 +import org.apache.log4j.varia.*;
 +import org.apache.log4j.xml.*;
 +import org.jetbrains.annotations.*;
 +
 +import java.io.*;
 +import java.net.*;
 +import java.util.*;
 +
 +import static org.apache.ignite.IgniteSystemProperties.*;
 +
 +/**
 + * Log4j-based implementation for logging. This logger should be used
 + * by loaders that have prefer <a target=_new 
href="http://logging.apache.org/log4j/docs/";>log4j</a>-based logging.
 + * <p>
-  * Here is a typical example of configuring log4j logger in GridGain 
configuration file:
++ * Here is a typical example of configuring log4j logger in Ignite 
configuration file:
 + * <pre name="code" class="xml">
 + *      &lt;property name="gridLogger"&gt;
-  *          &lt;bean 
class="org.gridgain.grid.logger.log4j.GridLog4jLogger"&gt;
++ *          &lt;bean 
class="org.apache.ignite.grid.logger.log4j.GridLog4jLogger"&gt;
 + *              &lt;constructor-arg type="java.lang.String" 
value="config/ignite-log4j.xml"/&gt;
 + *          &lt;/bean>
 + *      &lt;/property&gt;
 + * </pre>
 + * and from your code:
 + * <pre name="code" class="java">
 + *      GridConfiguration cfg = new GridConfiguration();
 + *      ...
-  *      URL xml = U.resolveGridGainUrl("config/custom-log4j.xml");
++ *      URL xml = U.resolveIgniteUrl("config/custom-log4j.xml");
 + *      GridLogger log = new GridLog4jLogger(xml);
 + *      ...
 + *      cfg.setGridLogger(log);
 + * </pre>
 + *
 + * Please take a look at <a target=_new 
href="http://logging.apache.org/log4j/1.2/index.html";>Apache Log4j 1.2</a>
 + * for additional information.
 + * <p>
-  * It's recommended to use GridGain logger injection instead of 
using/instantiating
++ * It's recommended to use Ignite logger injection instead of 
using/instantiating
 + * logger in your task/job code. See {@link 
org.apache.ignite.resources.LoggerResource} annotation about logger
 + * injection.
 + */
 +public class Log4JLogger implements IgniteLogger, LoggerNodeIdAware,
 +    IgniteLog4jFileAware {
 +    /** */
 +    private static final long serialVersionUID = 0L;
 +
 +    /** Appenders. */
 +    private static Collection<FileAppender> fileAppenders = new 
GridConcurrentHashSet<>();
 +
 +    /** */
 +    private static volatile boolean inited;
 +
 +    /** */
 +    private static volatile boolean quiet0;
 +
 +    /** */
 +    private static final Object mux = new Object();
 +
 +    /** Logger implementation. */
 +    @GridToStringExclude
 +    @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
 +    private Logger impl;
 +
 +    /** Path to configuration file. */
 +    private final String path;
 +
 +    /** Quiet flag. */
 +    private final boolean quiet;
 +
 +    /** Node ID. */
 +    private UUID nodeId;
 +
 +    /**
 +     * Creates new logger and automatically detects if root logger already
 +     * has appenders configured. If it does not, the root logger will be
 +     * configured with default appender (analogous to calling
 +     * {@link #Log4JLogger(boolean) GridLog4jLogger(boolean)}
 +     * with parameter {@code true}, otherwise, existing appenders will be 
used (analogous
 +     * to calling {@link #Log4JLogger(boolean) GridLog4jLogger(boolean)}
 +     * with parameter {@code false}).
 +     */
 +    public Log4JLogger() {
 +        this(!isConfigured());
 +    }
 +
 +    /**
 +     * Creates new logger. If initialize parameter is {@code true} the Log4j
 +     * logger will be initialized with default console appender and {@code 
INFO}
 +     * log level.
 +     *
 +     * @param init If {@code true}, then a default console appender with
 +     *      following pattern layout will be created: {@code %d{ABSOLUTE} 
%-5p [%c{1}] %m%n}.
 +     *      If {@code false}, then no implicit initialization will take place,
 +     *      and {@code Log4j} should be configured prior to calling this
 +     *      constructor.
 +     */
 +    public Log4JLogger(boolean init) {
 +        impl = Logger.getRootLogger();
 +
 +        if (init) {
 +            // Implementation has already been inited, passing NULL.
 +            addConsoleAppenderIfNeeded(Level.INFO, null);
 +
 +            quiet = quiet0;
 +        }
 +        else
 +            quiet = true;
 +
 +        path = null;
 +    }
 +
 +    /**
 +     * Creates new logger with given implementation.
 +     *
 +     * @param impl Log4j implementation to use.
 +     */
 +    public Log4JLogger(final Logger impl) {
 +        assert impl != null;
 +
 +        path = null;
 +
 +        addConsoleAppenderIfNeeded(null, new C1<Boolean, Logger>() {
 +            @Override public Logger apply(Boolean init) {
 +                return impl;
 +            }
 +        });
 +
 +        quiet = quiet0;
 +    }
 +
 +    /**
 +     * Creates new logger with given configuration {@code path}.
 +     *
 +     * @param path Path to log4j configuration XML file.
 +     * @throws IgniteCheckedException Thrown in case logger can't be created.
 +     */
 +    public Log4JLogger(String path) throws IgniteCheckedException {
 +        if (path == null)
 +            throw new IgniteCheckedException("Configuration XML file for 
Log4j must be specified.");
 +
 +        this.path = path;
 +
-         final URL cfgUrl = U.resolveGridGainUrl(path);
++        final URL cfgUrl = U.resolveIgniteUrl(path);
 +
 +        if (cfgUrl == null)
 +            throw new IgniteCheckedException("Log4j configuration path was 
not found: " + path);
 +
 +        addConsoleAppenderIfNeeded(null, new C1<Boolean, Logger>() {
 +            @Override public Logger apply(Boolean init) {
 +                if (init)
 +                    DOMConfigurator.configure(cfgUrl);
 +
 +                return Logger.getRootLogger();
 +            }
 +        });
 +
 +        quiet = quiet0;
 +    }
 +
 +    /**
 +     * Creates new logger with given configuration {@code cfgFile}.
 +     *
 +     * @param cfgFile Log4j configuration XML file.
 +     * @throws IgniteCheckedException Thrown in case logger can't be created.
 +     */
 +    public Log4JLogger(File cfgFile) throws IgniteCheckedException {
 +        if (cfgFile == null)
 +            throw new IgniteCheckedException("Configuration XML file for 
Log4j must be specified.");
 +
 +        if (!cfgFile.exists() || cfgFile.isDirectory())
 +            throw new IgniteCheckedException("Log4j configuration path was 
not found or is a directory: " + cfgFile);
 +
 +        path = cfgFile.getAbsolutePath();
 +
 +        addConsoleAppenderIfNeeded(null, new C1<Boolean, Logger>() {
 +            @Override public Logger apply(Boolean init) {
 +                if (init)
 +                    DOMConfigurator.configure(path);
 +
 +                return Logger.getRootLogger();
 +            }
 +        });
 +
 +        quiet = quiet0;
 +    }
 +
 +    /**
 +     * Creates new logger with given configuration {@code cfgUrl}.
 +     *
 +     * @param cfgUrl URL for Log4j configuration XML file.
 +     * @throws IgniteCheckedException Thrown in case logger can't be created.
 +     */
 +    public Log4JLogger(final URL cfgUrl) throws IgniteCheckedException {
 +        if (cfgUrl == null)
 +            throw new IgniteCheckedException("Configuration XML file for 
Log4j must be specified.");
 +
 +        path = null;
 +
 +        addConsoleAppenderIfNeeded(null, new C1<Boolean, Logger>() {
 +            @Override public Logger apply(Boolean init) {
 +                if (init)
 +                    DOMConfigurator.configure(cfgUrl);
 +
 +                return Logger.getRootLogger();
 +            }
 +        });
 +
 +        quiet = quiet0;
 +    }
 +
 +    /**
 +     * Checks if Log4j is already configured within this VM or not.
 +     *
 +     * @return {@code True} if log4j was already configured, {@code false} 
otherwise.
 +     */
 +    public static boolean isConfigured() {
 +        return Logger.getRootLogger().getAllAppenders().hasMoreElements();
 +    }
 +
 +    /**
 +     * Sets level for internal log4j implementation.
 +     *
 +     * @param level Log level to set.
 +     */
 +    public void setLevel(Level level) {
 +        impl.setLevel(level);
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Nullable @Override public String fileName() {
 +        FileAppender fapp = F.first(fileAppenders);
 +
 +        return fapp != null ? fapp.getFile() : null;
 +    }
 +
 +    /**
 +     * Adds console appender when needed with some default logging settings.
 +     *
 +     * @param logLevel Optional log level.
 +     * @param implInitC Optional log implementation init closure.
 +     */
 +    private void addConsoleAppenderIfNeeded(@Nullable Level logLevel,
 +        @Nullable IgniteClosure<Boolean, Logger> implInitC) {
 +        if (inited) {
 +            if (implInitC != null)
 +                // Do not init.
 +                impl = implInitC.apply(false);
 +
 +            return;
 +        }
 +
 +        synchronized (mux) {
 +            if (inited) {
 +                if (implInitC != null)
 +                    // Do not init.
 +                    impl = implInitC.apply(false);
 +
 +                return;
 +            }
 +
 +            if (implInitC != null)
 +                // Init logger impl.
 +                impl = implInitC.apply(true);
 +
 +            boolean quiet = Boolean.valueOf(System.getProperty(IGNITE_QUIET, 
"true"));
 +
 +            boolean consoleAppenderFound = false;
 +            Category rootCategory = null;
 +            ConsoleAppender errAppender = null;
 +
 +            for (Category l = impl; l != null; ) {
 +                if (!consoleAppenderFound) {
 +                    for (Enumeration appenders = l.getAllAppenders(); 
appenders.hasMoreElements(); ) {
 +                        Appender appender = (Appender)appenders.nextElement();
 +
 +                        if (appender instanceof ConsoleAppender) {
 +                            if ("CONSOLE_ERR".equals(appender.getName())) {
 +                                // Treat CONSOLE_ERR appender as a system one 
and don't count it.
 +                                errAppender = (ConsoleAppender)appender;
 +
 +                                continue;
 +                            }
 +
 +                            consoleAppenderFound = true;
 +
 +                            break;
 +                        }
 +                    }
 +                }
 +
 +                if (l.getParent() == null) {
 +                    rootCategory = l;
 +
 +                    break;
 +                }
 +                else
 +                    l = l.getParent();
 +            }
 +
 +            if (consoleAppenderFound && quiet)
 +                // User configured console appender, but log is quiet.
 +                quiet = false;
 +
 +            if (!consoleAppenderFound && !quiet && 
Boolean.valueOf(System.getProperty(IGNITE_CONSOLE_APPENDER, "true"))) {
 +                // Console appender not found => we've looked through all 
categories up to root.
 +                assert rootCategory != null;
 +
-                 // User launched gridgain in verbose mode and did not add 
console appender with INFO level
++                // User launched ignite in verbose mode and did not add 
console appender with INFO level
 +                // to configuration and did not set IGNITE_CONSOLE_APPENDER 
to false.
 +                if (errAppender != null) {
 +                    
rootCategory.addAppender(createConsoleAppender(Level.INFO));
 +
 +                    if (errAppender.getThreshold() == Level.ERROR)
 +                        errAppender.setThreshold(Level.WARN);
 +                }
 +                else
 +                    // No error console appender => create console appender 
with no level limit.
 +                    
rootCategory.addAppender(createConsoleAppender(Level.OFF));
 +
 +                if (logLevel != null)
 +                    impl.setLevel(logLevel);
 +            }
 +
 +            quiet0 = quiet;
 +            inited = true;
 +        }
 +    }
 +
 +    /**
 +     * Creates console appender with some reasonable default logging settings.
 +     *
 +     * @param maxLevel Max logging level.
 +     * @return New console appender.
 +     */
 +    private Appender createConsoleAppender(Level maxLevel) {
 +        String fmt = "[%d{ABSOLUTE}][%-5p][%t][%c{1}] %m%n";
 +
 +        // Configure output that should go to System.out
 +        Appender app = new ConsoleAppender(new PatternLayout(fmt), 
ConsoleAppender.SYSTEM_OUT);
 +
 +        LevelRangeFilter lvlFilter = new LevelRangeFilter();
 +
 +        lvlFilter.setLevelMin(Level.TRACE);
 +        lvlFilter.setLevelMax(maxLevel);
 +
 +        app.addFilter(lvlFilter);
 +
 +        return app;
 +    }
 +
 +    /**
 +     * Adds file appender.
 +     *
 +     * @param a Appender.
 +     */
 +    public static void addAppender(FileAppender a) {
 +        A.notNull(a, "a");
 +
 +        fileAppenders.add(a);
 +    }
 +
 +    /**
 +     * Removes file appender.
 +     *
 +     * @param a Appender.
 +     */
 +    public static void removeAppender(FileAppender a) {
 +        A.notNull(a, "a");
 +
 +        fileAppenders.remove(a);
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public void setNodeId(UUID nodeId) {
 +        A.notNull(nodeId, "nodeId");
 +
 +        this.nodeId = nodeId;
 +
 +        updateFilePath(new IgniteLog4jNodeIdFilePath(nodeId));
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public UUID getNodeId() {
 +        return nodeId;
 +    }
 +
 +    /**
 +     * Gets files for all registered file appenders.
 +     *
 +     * @return List of files.
 +     */
 +    public static Collection<String> logFiles() {
 +        Collection<String> res = new ArrayList<>(fileAppenders.size());
 +
 +        for (FileAppender a : fileAppenders)
 +            res.add(a.getFile());
 +
 +        return res;
 +    }
 +
 +    /**
 +     * Gets {@link org.apache.ignite.IgniteLogger} wrapper around log4j 
logger for the given
 +     * category. If category is {@code null}, then root logger is returned. If
 +     * category is an instance of {@link Class} then {@code 
(Class)ctgr).getName()}
 +     * is used as category name.
 +     *
 +     * @param ctgr {@inheritDoc}
 +     * @return {@link org.apache.ignite.IgniteLogger} wrapper around log4j 
logger.
 +     */
 +    @Override public Log4JLogger getLogger(Object ctgr) {
 +        return new Log4JLogger(ctgr == null ? Logger.getRootLogger() :
 +            ctgr instanceof Class ? 
Logger.getLogger(((Class<?>)ctgr).getName()) :
 +                Logger.getLogger(ctgr.toString()));
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public void trace(String msg) {
 +        if (!impl.isTraceEnabled())
 +            warning("Logging at TRACE level without checking if TRACE level 
is enabled: " + msg);
 +
 +        impl.trace(msg);
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public void debug(String msg) {
 +        if (!impl.isDebugEnabled())
 +            warning("Logging at DEBUG level without checking if DEBUG level 
is enabled: " + msg);
 +
 +        impl.debug(msg);
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public void info(String msg) {
 +        if (!impl.isInfoEnabled())
 +            warning("Logging at INFO level without checking if INFO level is 
enabled: " + msg);
 +
 +        impl.info(msg);
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public void warning(String msg) {
 +        impl.warn(msg);
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public void warning(String msg, @Nullable Throwable e) {
 +        impl.warn(msg, e);
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public void error(String msg) {
 +        impl.error(msg);
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public void error(String msg, @Nullable Throwable e) {
 +        impl.error(msg, e);
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public boolean isTraceEnabled() {
 +        return impl.isTraceEnabled();
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public boolean isDebugEnabled() {
 +        return impl.isDebugEnabled();
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public boolean isInfoEnabled() {
 +        return impl.isInfoEnabled();
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public boolean isQuiet() {
 +        return quiet;
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public String toString() {
 +        return S.toString(Log4JLogger.class, this);
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public void updateFilePath(IgniteClosure<String, String> 
filePathClos) {
 +        A.notNull(filePathClos, "filePathClos");
 +
 +        for (FileAppender a : fileAppenders) {
 +            if (a instanceof IgniteLog4jFileAware) {
 +                ((IgniteLog4jFileAware)a).updateFilePath(filePathClos);
 +
 +                a.activateOptions();
 +            }
 +        }
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jCorrectFileNameTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jLoggingFileTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jLoggingUrlTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/scalar/src/test/resources/spring-cache.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/scalar/src/test/resources/spring-ping-pong-partner.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/slf4j/src/main/java/org/apache/ignite/logger/slf4j/GridSlf4jLogger.java
----------------------------------------------------------------------
diff --cc 
modules/slf4j/src/main/java/org/apache/ignite/logger/slf4j/GridSlf4jLogger.java
index 3533b0d,eba5d7d..fa8576e
--- 
a/modules/slf4j/src/main/java/org/apache/ignite/logger/slf4j/GridSlf4jLogger.java
+++ 
b/modules/slf4j/src/main/java/org/apache/ignite/logger/slf4j/GridSlf4jLogger.java
@@@ -32,8 -32,8 +32,8 @@@ import org.slf4j.*
   *      &lt;/property&gt;
   * </pre>
   * <p>
-  * It's recommended to use GridGain's logger injection instead of 
using/instantiating
+  * It's recommended to use Ignite's logger injection instead of 
using/instantiating
 - * logger in your task/job code. See {@link 
org.apache.ignite.resources.IgniteLoggerResource} annotation about logger
 + * logger in your task/job code. See {@link 
org.apache.ignite.resources.LoggerResource} annotation about logger
   * injection.
   */
  public class GridSlf4jLogger implements IgniteLogger {

Reply via email to