http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/core/src/main/java/org/apache/ignite/configuration/IgniteFsConfiguration.java
----------------------------------------------------------------------
diff --cc 
modules/core/src/main/java/org/apache/ignite/configuration/IgniteFsConfiguration.java
index 4b3b455,0000000..87068d1
mode 100644,000000..100644
--- 
a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteFsConfiguration.java
+++ 
b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteFsConfiguration.java
@@@ -1,807 -1,0 +1,807 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.ignite.configuration;
 +
 +import org.apache.ignite.ignitefs.*;
 +import org.apache.ignite.internal.util.typedef.internal.*;
 +import org.jetbrains.annotations.*;
 +
 +import java.util.*;
 +import java.util.concurrent.*;
 +
 +/**
 + * {@code GGFS} configuration. More than one file system can be configured 
within grid.
 + * {@code GGFS} configuration is provided via {@link 
org.apache.ignite.configuration.IgniteConfiguration#getGgfsConfiguration()}
 + * method.
 + * <p>
 + * Refer to {@code config/hadoop/default-config.xml} or {@code 
config/hadoop/default-config-client.xml}
-  * configuration files under GridGain installation to see sample {@code GGFS} 
configuration.
++ * configuration files under Ignite installation to see sample {@code GGFS} 
configuration.
 + */
 +public class IgniteFsConfiguration {
 +    /** Default file system user name. */
 +    public static final String DFLT_USER_NAME = 
System.getProperty("user.name", "anonymous");
 +
 +    /** Default IPC port. */
 +    public static final int DFLT_IPC_PORT = 10500;
 +
 +    /** Default fragmentizer throttling block length. */
 +    public static final long DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH = 16 * 
1024 * 1024;
 +
 +    /** Default fragmentizer throttling delay. */
 +    public static final long DFLT_FRAGMENTIZER_THROTTLING_DELAY = 200;
 +
 +    /** Default fragmentizer concurrent files. */
 +    public static final int DFLT_FRAGMENTIZER_CONCURRENT_FILES = 0;
 +
 +    /** Default fragmentizer local writes ratio. */
 +    public static final float DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO = 0.8f;
 +
 +    /** Fragmentizer enabled property. */
 +    public static final boolean DFLT_FRAGMENTIZER_ENABLED = true;
 +
 +    /** Default batch size for logging. */
 +    public static final int DFLT_GGFS_LOG_BATCH_SIZE = 100;
 +
 +    /** Default {@code GGFS} log directory. */
 +    public static final String DFLT_GGFS_LOG_DIR = "work/ggfs/log";
 +
 +    /** Default per node buffer size. */
 +    public static final int DFLT_PER_NODE_BATCH_SIZE = 100;
 +
 +    /** Default number of per node parallel operations. */
 +    public static final int DFLT_PER_NODE_PARALLEL_BATCH_CNT = 8;
 +
 +    /** Default GGFS mode. */
 +    public static final IgniteFsMode DFLT_MODE = IgniteFsMode.DUAL_ASYNC;
 +
 +    /** Default file's data block size (bytes). */
 +    public static final int DFLT_BLOCK_SIZE = 1 << 16;
 +
 +    /** Default read/write buffers size (bytes). */
 +    public static final int DFLT_BUF_SIZE = 1 << 16;
 +
 +    /** Default trash directory purge await timeout in case data cache 
oversize is detected. */
 +    public static final long DFLT_TRASH_PURGE_TIMEOUT = 1000;
 +
 +    /** Default management port. */
 +    public static final int DFLT_MGMT_PORT = 11400;
 +
 +    /** Default IPC endpoint enabled flag. */
 +    public static final boolean DFLT_IPC_ENDPOINT_ENABLED = true;
 +
 +    /** GGFS instance name. */
 +    private String name;
 +
 +    /** Cache name to store GGFS meta information. */
 +    private String metaCacheName;
 +
 +    /** Cache name to store file's data blocks. */
 +    private String dataCacheName;
 +
 +    /** File's data block size (bytes). */
 +    private int blockSize = DFLT_BLOCK_SIZE;
 +
 +    /** The number of pre-fetched blocks if specific file's chunk is 
requested. */
 +    private int prefetchBlocks;
 +
 +    /** Amount of sequential block reads before prefetch is triggered. */
 +    private int seqReadsBeforePrefetch;
 +
 +    /** Read/write buffers size for stream operations (bytes). */
 +    private int bufSize = DFLT_BUF_SIZE;
 +
 +    /** Per node buffer size. */
 +    private int perNodeBatchSize = DFLT_PER_NODE_BATCH_SIZE;
 +
 +    /** Per node parallel operations. */
 +    private int perNodeParallelBatchCnt = DFLT_PER_NODE_PARALLEL_BATCH_CNT;
 +
 +    /** IPC endpoint properties to publish GGFS over. */
 +    private Map<String, String> ipcEndpointCfg;
 +
 +    /** IPC endpoint enabled flag. */
 +    private boolean ipcEndpointEnabled = DFLT_IPC_ENDPOINT_ENABLED;
 +
 +    /** Management port. */
 +    private int mgmtPort = DFLT_MGMT_PORT;
 +
 +    /** Secondary file system */
 +    private IgniteFsFileSystem secondaryFs;
 +
 +    /** GGFS mode. */
 +    private IgniteFsMode dfltMode = DFLT_MODE;
 +
 +    /** Fragmentizer throttling block length. */
 +    private long fragmentizerThrottlingBlockLen = 
DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH;
 +
 +    /** Fragmentizer throttling delay. */
 +    private long fragmentizerThrottlingDelay = 
DFLT_FRAGMENTIZER_THROTTLING_DELAY;
 +
 +    /** Fragmentizer concurrent files. */
 +    private int fragmentizerConcurrentFiles = 
DFLT_FRAGMENTIZER_CONCURRENT_FILES;
 +
 +    /** Fragmentizer local writes ratio. */
 +    private float fragmentizerLocWritesRatio = 
DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO;
 +
 +    /** Fragmentizer enabled flag. */
 +    private boolean fragmentizerEnabled = DFLT_FRAGMENTIZER_ENABLED;
 +
 +    /** Path modes. */
 +    private Map<String, IgniteFsMode> pathModes;
 +
 +    /** Maximum space. */
 +    private long maxSpace;
 +
 +    /** Trash purge await timeout. */
 +    private long trashPurgeTimeout = DFLT_TRASH_PURGE_TIMEOUT;
 +
 +    /** Dual mode PUT operations executor service. */
 +    private ExecutorService dualModePutExec;
 +
 +    /** Dual mode PUT operations executor service shutdown flag. */
 +    private boolean dualModePutExecShutdown;
 +
 +    /** Maximum amount of data in pending puts. */
 +    private long dualModeMaxPendingPutsSize;
 +
 +    /** Maximum range length. */
 +    private long maxTaskRangeLen;
 +
 +    /**
 +     * Constructs default configuration.
 +     */
 +    public IgniteFsConfiguration() {
 +        // No-op.
 +    }
 +
 +    /**
 +     * Constructs the copy of the configuration.
 +     *
 +     * @param cfg Configuration to copy.
 +     */
 +    public IgniteFsConfiguration(IgniteFsConfiguration cfg) {
 +        assert cfg != null;
 +
 +        /*
 +         * Must preserve alphabetical order!
 +         */
 +        blockSize = cfg.getBlockSize();
 +        bufSize = cfg.getStreamBufferSize();
 +        dataCacheName = cfg.getDataCacheName();
 +        dfltMode = cfg.getDefaultMode();
 +        dualModeMaxPendingPutsSize = cfg.getDualModeMaxPendingPutsSize();
 +        dualModePutExec = cfg.getDualModePutExecutorService();
 +        dualModePutExecShutdown = cfg.getDualModePutExecutorServiceShutdown();
 +        fragmentizerConcurrentFiles = cfg.getFragmentizerConcurrentFiles();
 +        fragmentizerLocWritesRatio = cfg.getFragmentizerLocalWritesRatio();
 +        fragmentizerEnabled = cfg.isFragmentizerEnabled();
 +        fragmentizerThrottlingBlockLen = 
cfg.getFragmentizerThrottlingBlockLength();
 +        fragmentizerThrottlingDelay = cfg.getFragmentizerThrottlingDelay();
 +        secondaryFs = cfg.getSecondaryFileSystem();
 +        ipcEndpointCfg = cfg.getIpcEndpointConfiguration();
 +        ipcEndpointEnabled = cfg.isIpcEndpointEnabled();
 +        maxSpace = cfg.getMaxSpaceSize();
 +        maxTaskRangeLen = cfg.getMaximumTaskRangeLength();
 +        metaCacheName = cfg.getMetaCacheName();
 +        mgmtPort = cfg.getManagementPort();
 +        name = cfg.getName();
 +        pathModes = cfg.getPathModes();
 +        perNodeBatchSize = cfg.getPerNodeBatchSize();
 +        perNodeParallelBatchCnt = cfg.getPerNodeParallelBatchCount();
 +        prefetchBlocks = cfg.getPrefetchBlocks();
 +        seqReadsBeforePrefetch = cfg.getSequentialReadsBeforePrefetch();
 +        trashPurgeTimeout = cfg.getTrashPurgeTimeout();
 +    }
 +
 +    /**
 +     * Gets GGFS instance name. If {@code null}, then instance with default
 +     * name will be used.
 +     *
 +     * @return GGFS instance name.
 +     */
 +    @Nullable public String getName() {
 +        return name;
 +    }
 +
 +    /**
 +     * Sets GGFS instance name.
 +     *
 +     * @param name GGFS instance name.
 +     */
 +    public void setName(String name) {
 +        this.name = name;
 +    }
 +
 +    /**
 +     * Cache name to store GGFS meta information. If {@code null}, then 
instance
 +     * with default meta-cache name will be used.
 +     *
 +     * @return Cache name to store GGFS meta information.
 +     */
 +    @Nullable public String getMetaCacheName() {
 +        return metaCacheName;
 +    }
 +
 +    /**
 +     * Sets cache name to store GGFS meta information.
 +     *
 +     * @param metaCacheName Cache name to store GGFS meta information.
 +     */
 +    public void setMetaCacheName(String metaCacheName) {
 +        this.metaCacheName = metaCacheName;
 +    }
 +
 +    /**
 +     * Cache name to store GGFS data.
 +     *
 +     * @return Cache name to store GGFS data.
 +     */
 +    @Nullable public String getDataCacheName() {
 +        return dataCacheName;
 +    }
 +
 +    /**
 +     * Sets cache name to store GGFS data.
 +     *
 +     * @param dataCacheName Cache name to store GGFS data.
 +     */
 +    public void setDataCacheName(String dataCacheName) {
 +        this.dataCacheName = dataCacheName;
 +    }
 +
 +    /**
 +     * Get file's data block size.
 +     *
 +     * @return File's data block size.
 +     */
 +    public int getBlockSize() {
 +        return blockSize;
 +    }
 +
 +    /**
 +     * Sets file's data block size.
 +     *
 +     * @param blockSize File's data block size (bytes) or {@code 0} to reset 
default value.
 +     */
 +    public void setBlockSize(int blockSize) {
 +        A.ensure(blockSize >= 0, "blockSize >= 0");
 +
 +        this.blockSize = blockSize == 0 ? DFLT_BLOCK_SIZE : blockSize;
 +    }
 +
 +    /**
 +     * Get number of pre-fetched blocks if specific file's chunk is requested.
 +     *
 +     * @return The number of pre-fetched blocks.
 +     */
 +    public int getPrefetchBlocks() {
 +        return prefetchBlocks;
 +    }
 +
 +    /**
 +     * Sets the number of pre-fetched blocks if specific file's chunk is 
requested.
 +     *
 +     * @param prefetchBlocks New number of pre-fetched blocks.
 +     */
 +    public void setPrefetchBlocks(int prefetchBlocks) {
 +        A.ensure(prefetchBlocks >= 0, "prefetchBlocks >= 0");
 +
 +        this.prefetchBlocks = prefetchBlocks;
 +    }
 +
 +    /**
 +     * Get amount of sequential block reads before prefetch is triggered. The
 +     * higher this value, the longer GGFS will wait before starting to 
prefetch
 +     * values ahead of time. Depending on the use case, this can either help
 +     * or hurt performance.
 +     * <p>
 +     * Default is {@code 0} which means that pre-fetching will start right 
away.
 +     * <h1 class="header">Integration With Hadoop</h1>
 +     * This parameter can be also overridden for individual Hadoop MapReduce 
tasks by passing
 +     * {@code 
org.apache.ignite.ignitefs.hadoop.GridGgfsHadoopParameters.PARAM_GGFS_SEQ_READS_BEFORE_PREFETCH}
 +     * configuration property directly to Hadoop MapReduce task.
 +     * <p>
 +     * <b>NOTE:</b> Integration with Hadoop is available only in {@code 
In-Memory Accelerator For Hadoop} edition.
 +     *
 +     * @return Amount of sequential block reads.
 +     */
 +    public int getSequentialReadsBeforePrefetch() {
 +        return seqReadsBeforePrefetch;
 +    }
 +
 +    /**
 +     * Sets amount of sequential block reads before prefetch is triggered. The
 +     * higher this value, the longer GGFS will wait before starting to 
prefetch
 +     * values ahead of time. Depending on the use case, this can either help
 +     * or hurt performance.
 +     * <p>
 +     * Default is {@code 0} which means that pre-fetching will start right 
away.
 +     * <h1 class="header">Integration With Hadoop</h1>
 +     * This parameter can be also overridden for individual Hadoop MapReduce 
tasks by passing
 +     * {@code 
org.apache.ignite.ignitefs.hadoop.GridGgfsHadoopParameters.PARAM_GGFS_SEQ_READS_BEFORE_PREFETCH}
 +     * configuration property directly to Hadoop MapReduce task.
 +     * <p>
 +     * <b>NOTE:</b> Integration with Hadoop is available only in {@code 
In-Memory Accelerator For Hadoop} edition.
 +     *
 +     * @param seqReadsBeforePrefetch Amount of sequential block reads before 
prefetch is triggered.
 +     */
 +    public void setSequentialReadsBeforePrefetch(int seqReadsBeforePrefetch) {
 +        A.ensure(seqReadsBeforePrefetch >= 0, "seqReadsBeforePrefetch >= 0");
 +
 +        this.seqReadsBeforePrefetch = seqReadsBeforePrefetch;
 +    }
 +
 +    /**
 +     * Get read/write buffer size for {@code GGFS} stream operations in bytes.
 +     *
 +     * @return Read/write buffers size (bytes).
 +     */
 +    public int getStreamBufferSize() {
 +        return bufSize;
 +    }
 +
 +    /**
 +     * Sets read/write buffers size for {@code GGFS} stream operations 
(bytes).
 +     *
 +     * @param bufSize Read/write buffers size for stream operations (bytes) 
or {@code 0} to reset default value.
 +     */
 +    public void setStreamBufferSize(int bufSize) {
 +        A.ensure(bufSize >= 0, "bufSize >= 0");
 +
 +        this.bufSize = bufSize == 0 ? DFLT_BUF_SIZE : bufSize;
 +    }
 +
 +    /**
 +     * Gets number of file blocks buffered on local node before sending batch 
to remote node.
 +     *
 +     * @return Per node buffer size.
 +     */
 +    public int getPerNodeBatchSize() {
 +        return perNodeBatchSize;
 +    }
 +
 +    /**
 +     * Sets number of file blocks collected on local node before sending 
batch to remote node.
 +     *
 +     * @param perNodeBatchSize Per node buffer size.
 +     */
 +    public void setPerNodeBatchSize(int perNodeBatchSize) {
 +        this.perNodeBatchSize = perNodeBatchSize;
 +    }
 +
 +    /**
 +     * Gets number of batches that can be concurrently sent to remote node.
 +     *
 +     * @return Number of batches for each node.
 +     */
 +    public int getPerNodeParallelBatchCount() {
 +        return perNodeParallelBatchCnt;
 +    }
 +
 +    /**
 +     * Sets number of file block batches that can be concurrently sent to 
remote node.
 +     *
 +     * @param perNodeParallelBatchCnt Per node parallel load operations.
 +     */
 +    public void setPerNodeParallelBatchCount(int perNodeParallelBatchCnt) {
 +        this.perNodeParallelBatchCnt = perNodeParallelBatchCnt;
 +    }
 +
 +    /**
 +     * Gets map of IPC endpoint configuration properties. There are 2 
different
 +     * types of endpoint supported: {@code shared-memory}, and {@code TCP}.
 +     * <p>
 +     * The following configuration properties are supported for {@code 
shared-memory}
 +     * endpoint:
 +     * <ul>
 +     *     <li>{@code type} - value is {@code shmem} to specify {@code 
shared-memory} approach.</li>
 +     *     <li>{@code port} - endpoint port.</li>
 +     *     <li>{@code size} - memory size allocated for single endpoint 
communication.</li>
 +     *     <li>
 +     *         {@code tokenDirectoryPath} - path, either absolute or relative 
to {@code IGNITE_HOME} to
 +     *         store shared memory tokens.
 +     *     </li>
 +     * </ul>
 +     * <p>
 +     * The following configuration properties are supported for {@code TCP} 
approach:
 +     * <ul>
 +     *     <li>{@code type} - value is {@code tcp} to specify {@code TCP} 
approach.</li>
 +     *     <li>{@code port} - endpoint bind port.</li>
 +     *     <li>
 +     *         {@code host} - endpoint bind host. If omitted '127.0.0.1' will 
be used.
 +     *     </li>
 +     * </ul>
 +     * <p>
 +     * Note that {@code shared-memory} approach is not supported on Windows 
environments.
 +     * In case GGFS is failed to bind to particular port, further attempts 
will be performed every 3 seconds.
 +     *
 +     * @return Map of IPC endpoint configuration properties. In case the 
value is not set, defaults will be used. Default
 +     * type for Windows is "tcp", for all other platforms - "shmem". Default 
port is {@link #DFLT_IPC_PORT}.
 +     */
 +    @Nullable public Map<String,String> getIpcEndpointConfiguration() {
 +        return ipcEndpointCfg;
 +    }
 +
 +    /**
 +     * Sets IPC endpoint configuration to publish GGFS over.
 +     *
 +     * @param ipcEndpointCfg Map of IPC endpoint config properties.
 +     */
 +    public void setIpcEndpointConfiguration(@Nullable Map<String,String> 
ipcEndpointCfg) {
 +        this.ipcEndpointCfg = ipcEndpointCfg;
 +    }
 +
 +    /**
 +     * Get IPC endpoint enabled flag. In case it is set to {@code true} 
endpoint will be created and bound to specific
 +     * port. Otherwise endpoint will not be created. Default value is {@link 
#DFLT_IPC_ENDPOINT_ENABLED}.
 +     *
 +     * @return {@code True} in case endpoint is enabled.
 +     */
 +    public boolean isIpcEndpointEnabled() {
 +        return ipcEndpointEnabled;
 +    }
 +
 +    /**
 +     * Set IPC endpoint enabled flag. See {@link #isIpcEndpointEnabled()}.
 +     *
 +     * @param ipcEndpointEnabled IPC endpoint enabled flag.
 +     */
 +    public void setIpcEndpointEnabled(boolean ipcEndpointEnabled) {
 +        this.ipcEndpointEnabled = ipcEndpointEnabled;
 +    }
 +
 +    /**
 +     * Gets port number for management endpoint. All GGFS nodes should have 
this port open
 +     * for Visor Management Console to work with GGFS.
 +     * <p>
 +     * Default value is {@link #DFLT_MGMT_PORT}
 +     *
 +     * @return Port number or {@code -1} if management endpoint should be 
disabled.
 +     */
 +    public int getManagementPort() {
 +        return mgmtPort;
 +    }
 +
 +    /**
 +     * Sets management endpoint port.
 +     *
 +     * @param mgmtPort port number or {@code -1} to disable management 
endpoint.
 +     */
 +    public void setManagementPort(int mgmtPort) {
 +        this.mgmtPort = mgmtPort;
 +    }
 +
 +    /**
 +     * Gets mode to specify how {@code GGFS} interacts with Hadoop file 
system, like {@code HDFS}.
 +     * Secondary Hadoop file system is provided for pass-through, 
write-through, and read-through
 +     * purposes.
 +     * <p>
 +     * Default mode is {@link IgniteFsMode#DUAL_ASYNC}. If secondary Hadoop 
file system is
 +     * not configured, this mode will work just like {@link 
IgniteFsMode#PRIMARY} mode.
 +     *
 +     * @return Mode to specify how GGFS interacts with secondary HDFS file 
system.
 +     */
 +    public IgniteFsMode getDefaultMode() {
 +        return dfltMode;
 +    }
 +
 +    /**
 +     * Sets {@code GGFS} mode to specify how it should interact with secondary
 +     * Hadoop file system, like {@code HDFS}. Secondary Hadoop file system is 
provided
 +     * for pass-through, write-through, and read-through purposes.
 +     *
 +     * @param dfltMode {@code GGFS} mode.
 +     */
 +    public void setDefaultMode(IgniteFsMode dfltMode) {
 +        this.dfltMode = dfltMode;
 +    }
 +
 +    /**
 +     * Gets the secondary file system. Secondary file system is provided for 
pass-through, write-through,
 +     * and read-through purposes.
 +     *
 +     * @return Secondary file system.
 +     */
 +    public IgniteFsFileSystem getSecondaryFileSystem() {
 +        return secondaryFs;
 +    }
 +
 +    /**
 +     * Sets the secondary file system. Secondary file system is provided for 
pass-through, write-through,
 +     * and read-through purposes.
 +     *
 +     * @param fileSystem
 +     */
 +    public void setSecondaryFileSystem(IgniteFsFileSystem fileSystem) {
 +        secondaryFs = fileSystem;
 +    }
 +
 +    /**
 +     * Gets map of path prefixes to {@code GGFS} modes used for them.
 +     * <p>
 +     * If path doesn't correspond to any specified prefix or mappings are not 
provided, then
 +     * {@link #getDefaultMode()} is used.
 +     * <p>
-      * Several folders under {@code '/gridgain'} folder have predefined 
mappings which cannot be overridden.
-      * <li>{@code /gridgain/primary} and all it's sub-folders will always 
work in {@code PRIMARY} mode.</li>
++     * Several folders under {@code '/apache/ignite'} folder have predefined 
mappings which cannot be overridden.
++     * <li>{@code /apache/ignite/primary} and all it's sub-folders will 
always work in {@code PRIMARY} mode.</li>
 +     * <p>
 +     * And in case secondary file system URI is provided:
-      * <li>{@code /gridgain/proxy} and all it's sub-folders will always work 
in {@code PROXY} mode.</li>
-      * <li>{@code /gridgain/sync} and all it's sub-folders will always work 
in {@code DUAL_SYNC} mode.</li>
-      * <li>{@code /gridgain/async} and all it's sub-folders will always work 
in {@code DUAL_ASYNC} mode.</li>
++     * <li>{@code /apache/ignite/proxy} and all it's sub-folders will always 
work in {@code PROXY} mode.</li>
++     * <li>{@code /apache/ignite/sync} and all it's sub-folders will always 
work in {@code DUAL_SYNC} mode.</li>
++     * <li>{@code /apache/ignite/async} and all it's sub-folders will always 
work in {@code DUAL_ASYNC} mode.</li>
 +     *
 +     * @return Map of paths to {@code GGFS} modes.
 +     */
 +    @Nullable public Map<String, IgniteFsMode> getPathModes() {
 +        return pathModes;
 +    }
 +
 +    /**
 +     * Sets map of path prefixes to {@code GGFS} modes used for them.
 +     * <p>
 +     * If path doesn't correspond to any specified prefix or mappings are not 
provided, then
 +     * {@link #getDefaultMode()} is used.
 +     *
 +     * @param pathModes Map of paths to {@code GGFS} modes.
 +     */
 +    public void setPathModes(Map<String, IgniteFsMode> pathModes) {
 +        this.pathModes = pathModes;
 +    }
 +
 +    /**
 +     * Gets the length of file chunk to send before delaying the fragmentizer.
 +     *
 +     * @return File chunk length in bytes.
 +     */
 +    public long getFragmentizerThrottlingBlockLength() {
 +        return fragmentizerThrottlingBlockLen;
 +    }
 +
 +    /**
 +     * Sets length of file chunk to transmit before throttling is delayed.
 +     *
 +     * @param fragmentizerThrottlingBlockLen Block length in bytes.
 +     */
 +    public void setFragmentizerThrottlingBlockLength(long 
fragmentizerThrottlingBlockLen) {
 +        this.fragmentizerThrottlingBlockLen = fragmentizerThrottlingBlockLen;
 +    }
 +
 +    /**
 +     * Gets throttle delay for fragmentizer.
 +     *
 +     * @return Throttle delay in milliseconds.
 +     */
 +    public long getFragmentizerThrottlingDelay() {
 +        return fragmentizerThrottlingDelay;
 +    }
 +
 +    /**
 +     * Sets delay in milliseconds for which fragmentizer is paused.
 +     *
 +     * @param fragmentizerThrottlingDelay Delay in milliseconds.
 +     */
 +    public void setFragmentizerThrottlingDelay(long 
fragmentizerThrottlingDelay) {
 +        this.fragmentizerThrottlingDelay = fragmentizerThrottlingDelay;
 +    }
 +
 +    /**
 +     * Gets number of files that can be processed by fragmentizer 
concurrently.
 +     *
 +     * @return Number of files to process concurrently.
 +     */
 +    public int getFragmentizerConcurrentFiles() {
 +        return fragmentizerConcurrentFiles;
 +    }
 +
 +    /**
 +     * Sets number of files to process concurrently by fragmentizer.
 +     *
 +     * @param fragmentizerConcurrentFiles Number of files to process 
concurrently.
 +     */
 +    public void setFragmentizerConcurrentFiles(int 
fragmentizerConcurrentFiles) {
 +        this.fragmentizerConcurrentFiles = fragmentizerConcurrentFiles;
 +    }
 +
 +    /**
 +     * Gets amount of local memory (in % of local GGFS max space size) 
available for local writes
 +     * during file creation.
 +     * <p>
 +     * If current GGFS space size is less than {@code 
fragmentizerLocalWritesRatio * maxSpaceSize},
 +     * then file blocks will be written to the local node first and then 
asynchronously distributed
 +     * among cluster nodes (fragmentized).
 +     * <p>
 +     * Default value is {@link #DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO}.
 +     *
 +     * @return Ratio for local writes space.
 +     */
 +    public float getFragmentizerLocalWritesRatio() {
 +        return fragmentizerLocWritesRatio;
 +    }
 +
 +    /**
 +     * Sets ratio for space available for local file writes.
 +     *
 +     * @param fragmentizerLocWritesRatio Ratio for local file writes.
 +     * @see #getFragmentizerLocalWritesRatio()
 +     */
 +    public void setFragmentizerLocalWritesRatio(float 
fragmentizerLocWritesRatio) {
 +        this.fragmentizerLocWritesRatio = fragmentizerLocWritesRatio;
 +    }
 +
 +    /**
 +     * Gets flag indicating whether GGFS fragmentizer is enabled. If 
fragmentizer is disabled, files will be
 +     * written in distributed fashion.
 +     *
 +     * @return Flag indicating whether fragmentizer is enabled.
 +     */
 +    public boolean isFragmentizerEnabled() {
 +        return fragmentizerEnabled;
 +    }
 +
 +    /**
 +     * Sets property indicating whether fragmentizer is enabled.
 +     *
 +     * @param fragmentizerEnabled {@code True} if fragmentizer is enabled.
 +     */
 +    public void setFragmentizerEnabled(boolean fragmentizerEnabled) {
 +        this.fragmentizerEnabled = fragmentizerEnabled;
 +    }
 +
 +    /**
 +     * Get maximum space available for data cache to store file system 
entries.
 +     *
 +     * @return Maximum space available for data cache.
 +     */
 +    public long getMaxSpaceSize() {
 +        return maxSpace;
 +    }
 +
 +    /**
 +     * Set maximum space in bytes available in data cache.
 +     *
 +     * @param maxSpace Maximum space available in data cache.
 +     */
 +    public void setMaxSpaceSize(long maxSpace) {
 +        this.maxSpace = maxSpace;
 +    }
 +
 +    /**
 +     * Gets maximum timeout awaiting for trash purging in case data cache 
oversize is detected.
 +     *
 +     * @return Maximum timeout awaiting for trash purging in case data cache 
oversize is detected.
 +     */
 +    public long getTrashPurgeTimeout() {
 +        return trashPurgeTimeout;
 +    }
 +
 +    /**
 +     * Sets maximum timeout awaiting for trash purging in case data cache 
oversize is detected.
 +     *
 +     * @param trashPurgeTimeout Maximum timeout awaiting for trash purging in 
case data cache oversize is detected.
 +     */
 +    public void setTrashPurgeTimeout(long trashPurgeTimeout) {
 +        this.trashPurgeTimeout = trashPurgeTimeout;
 +    }
 +
 +    /**
 +     * Get DUAL mode put operation executor service. This executor service 
will process cache PUT requests for
 +     * data which came from the secondary file system and about to be written 
to GGFS data cache.
 +     * In case no executor service is provided, default one will be created 
with maximum amount of threads equals
 +     * to amount of processor cores.
 +     *
 +     * @return Get DUAL mode put operation executor service
 +     */
 +    @Nullable public ExecutorService getDualModePutExecutorService() {
 +        return dualModePutExec;
 +    }
 +
 +    /**
 +     * Set DUAL mode put operations executor service.
 +     *
 +     * @param dualModePutExec Dual mode put operations executor service.
 +     */
 +    public void setDualModePutExecutorService(ExecutorService 
dualModePutExec) {
 +        this.dualModePutExec = dualModePutExec;
 +    }
 +
 +    /**
 +     * Get DUAL mode put operation executor service shutdown flag.
 +     *
 +     * @return DUAL mode put operation executor service shutdown flag.
 +     */
 +    public boolean getDualModePutExecutorServiceShutdown() {
 +        return dualModePutExecShutdown;
 +    }
 +
 +    /**
 +     * Set DUAL mode put operations executor service shutdown flag.
 +     *
 +     * @param dualModePutExecShutdown Dual mode put operations executor 
service shutdown flag.
 +     */
 +    public void setDualModePutExecutorServiceShutdown(boolean 
dualModePutExecShutdown) {
 +        this.dualModePutExecShutdown = dualModePutExecShutdown;
 +    }
 +
 +    /**
 +     * Get maximum amount of pending data read from the secondary file system 
and waiting to be written to data
 +     * cache. {@code 0} or negative value stands for unlimited size.
 +     * <p>
 +     * By default this value is set to {@code 0}. It is recommended to set 
positive value in case your
 +     * application performs frequent reads of large amount of data from the 
secondary file system in order to
 +     * avoid issues with increasing GC pauses or out-of-memory error.
 +     *
 +     * @return Maximum amount of pending data read from the secondary file 
system
 +     */
 +    public long getDualModeMaxPendingPutsSize() {
 +        return dualModeMaxPendingPutsSize;
 +    }
 +
 +    /**
 +     * Set maximum amount of data in pending put operations.
 +     *
 +     * @param dualModeMaxPendingPutsSize Maximum amount of data in pending 
put operations.
 +     */
 +    public void setDualModeMaxPendingPutsSize(long 
dualModeMaxPendingPutsSize) {
 +        this.dualModeMaxPendingPutsSize = dualModeMaxPendingPutsSize;
 +    }
 +
 +    /**
 +     * Get maximum default range size of a file being split during GGFS task 
execution. When GGFS task is about to
 +     * be executed, it requests file block locations first. Each location is 
defined as {@link org.apache.ignite.ignitefs.mapreduce.IgniteFsFileRange} which
 +     * has length. In case this parameter is set to positive value, then GGFS 
will split single file range into smaller
 +     * ranges with length not greater that this parameter. The only exception 
to this case is when maximum task range
 +     * length is smaller than file block size. In this case maximum task 
range size will be overridden and set to file
 +     * block size.
 +     * <p>
 +     * Note that this parameter is applied when task is split into jobs 
before {@link org.apache.ignite.ignitefs.mapreduce.IgniteFsRecordResolver} is
 +     * applied. Therefore, final file ranges being assigned to particular 
jobs could be greater than value of this
 +     * parameter depending on file data layout and selected resolver type.
 +     * <p>
 +     * Setting this parameter might be useful when file is highly colocated 
and have very long consequent data chunks
 +     * so that task execution suffers from insufficient parallelism. E.g., in 
case you have one GGFS node in topology
 +     * and want to process 1Gb file, then only single range of length 1Gb 
will be returned. This will result in
 +     * a single job which will be processed in one thread. But in case you 
provide this configuration parameter and set
 +     * maximum range length to 16Mb, then 64 ranges will be returned 
resulting in 64 jobs which could be executed in
 +     * parallel.
 +     * <p>
 +     * Note that some {@code GridGgfs.execute()} methods can override value 
of this parameter.
 +     * <p>
 +     * In case value of this parameter is set to {@code 0} or negative value, 
it is simply ignored. Default value is
 +     * {@code 0}.
 +     *
 +     * @return Maximum range size of a file being split during GGFS task 
execution.
 +     */
 +    public long getMaximumTaskRangeLength() {
 +        return maxTaskRangeLen;
 +    }
 +
 +    /**
 +     * Set maximum default range size of a file being split during GGFS task 
execution.
 +     * See {@link #getMaximumTaskRangeLength()} for more details.
 +     *
 +     * @param maxTaskRangeLen Set maximum default range size of a file being 
split during GGFS task execution.
 +     */
 +    public void setMaximumTaskRangeLength(long maxTaskRangeLen) {
 +        this.maxTaskRangeLen = maxTaskRangeLen;
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public String toString() {
 +        return S.toString(IgniteFsConfiguration.class, this);
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/core/src/main/java/org/apache/ignite/configuration/QueryConfiguration.java
----------------------------------------------------------------------
diff --cc 
modules/core/src/main/java/org/apache/ignite/configuration/QueryConfiguration.java
index 804cc04,0000000..00cd7bc
mode 100644,000000..100644
--- 
a/modules/core/src/main/java/org/apache/ignite/configuration/QueryConfiguration.java
+++ 
b/modules/core/src/main/java/org/apache/ignite/configuration/QueryConfiguration.java
@@@ -1,201 -1,0 +1,201 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.ignite.configuration;
 +
 +import org.apache.ignite.internal.util.typedef.internal.*;
 +import org.jetbrains.annotations.*;
 +
 +/**
 + * Configuration for embedded indexing facilities.
 + */
 +public class QueryConfiguration {
 +    /** Default query execution time interpreted as long query (3 seconds). */
 +    public static final long DFLT_LONG_QRY_EXEC_TIMEOUT = 3000;
 +
 +    /** Default value for {@link #setUseOptimizedSerializer(boolean)} flag. */
 +    public static final boolean DFLT_USE_OPTIMIZED_SERIALIZER = true;
 +
 +    /** */
 +    private Class<?>[] idxCustomFuncClss;
 +
 +    /** */
 +    private String[] searchPath;
 +
 +    /** */
 +    private String initScriptPath;
 +
 +    /** */
 +    private long maxOffHeapMemory = -1;
 +
 +    /** */
 +    private long longQryExecTimeout = DFLT_LONG_QRY_EXEC_TIMEOUT;
 +
 +    /** */
 +    private boolean longQryExplain;
 +
 +    /** */
 +    private boolean useOptimizedSerializer = DFLT_USE_OPTIMIZED_SERIALIZER;
 +
 +    /**
 +     * Sets maximum amount of memory available to off-heap storage. Possible 
values are
 +     * <ul>
 +     * <li>{@code -1} - Means that off-heap storage is disabled.</li>
 +     * <li>
-      *     {@code 0} - GridGain will not limit off-heap storage (it's up to 
user to properly
++     *     {@code 0} - Ignite will not limit off-heap storage (it's up to 
user to properly
 +     *     add and remove entries from cache to ensure that off-heap storage 
does not grow
 +     *     indefinitely.
 +     * </li>
 +     * <li>Any positive value specifies the limit of off-heap storage in 
bytes.</li>
 +     * </ul>
 +     * Default value is {@code -1}, which means that off-heap storage is 
disabled by default.
 +     * <p>
 +     * Use off-heap storage to load gigabytes of data in memory without 
slowing down
 +     * Garbage Collection. Essentially in this case you should allocate very 
small amount
-      * of memory to JVM and GridGain will cache most of the data in off-heap 
space
++     * of memory to JVM and Ignite will cache most of the data in off-heap 
space
 +     * without affecting JVM performance at all.
 +     *
 +     * @param maxOffHeapMemory Maximum memory in bytes available to off-heap 
memory space.
 +     */
 +    public void setMaxOffHeapMemory(long maxOffHeapMemory) {
 +        this.maxOffHeapMemory = maxOffHeapMemory;
 +    }
 +
 +    /** {@inheritDoc} */
 +    public long getMaxOffHeapMemory() {
 +        return maxOffHeapMemory;
 +    }
 +
 +    /**
 +     * Specifies max allowed size of cache for deserialized offheap rows to 
avoid deserialization costs for most
 +     * frequently used ones. In general performance is better with greater 
cache size. Must be more than 128 items.
 +     *
 +     * @param size Cache size in items.
 +     */
 +    public void setMaxOffheapRowsCacheSize(int size) {
 +        A.ensure(size >= 128, "Offheap rows cache size must be not less than 
128.");
 +
 +//        rowCache = new CacheLongKeyLIRS<>(size, 1, 128, 256); TODO
 +    }
 +
 +    /**
 +     * Sets the optional search path consisting of space names to search SQL 
schema objects. Useful for cross cache
 +     * queries to avoid writing fully qualified table names.
 +     *
 +     * @param searchPath Search path.
 +     */
 +    public void setSearchPath(String... searchPath) {
 +        this.searchPath = searchPath;
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Nullable public String[] getSearchPath() {
 +        return searchPath;
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Nullable public String getInitialScriptPath() {
 +        return initScriptPath;
 +    }
 +
 +    /**
 +     * Sets script path to be ran against H2 database after opening.
 +     * The script must be UTF-8 encoded file.
 +     *
 +     * @param initScriptPath Script path.
 +     */
 +    public void setInitialScriptPath(String initScriptPath) {
 +        this.initScriptPath = initScriptPath;
 +    }
 +
 +    /**
 +     * Sets classes with methods annotated by {@link 
org.apache.ignite.cache.query.CacheQuerySqlFunction}
 +     * to be used as user-defined functions from SQL queries.
 +     *
 +     * @param idxCustomFuncClss List of classes.
 +     */
 +    public void setIndexCustomFunctionClasses(Class<?>... idxCustomFuncClss) {
 +        this.idxCustomFuncClss = idxCustomFuncClss;
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Nullable public Class<?>[] getIndexCustomFunctionClasses() {
 +        return idxCustomFuncClss;
 +    }
 +
 +    /** {@inheritDoc} */
 +    public long getLongQueryExecutionTimeout() {
 +        return longQryExecTimeout;
 +    }
 +
 +    /**
 +     * Set query execution time threshold. If queries exceed this threshold,
 +     * then a warning will be printed out. If {@link 
#setLongQueryExplain(boolean)} is
 +     * set to {@code true}, then execution plan will be printed out as well.
 +     * <p>
 +     * If not provided, default value is defined by {@link 
#DFLT_LONG_QRY_EXEC_TIMEOUT}.
 +     *
 +     * @param longQryExecTimeout Long query execution timeout.
 +     * @see #setLongQueryExplain(boolean)
 +     */
 +    public void setLongQueryExecutionTimeout(long longQryExecTimeout) {
 +        this.longQryExecTimeout = longQryExecTimeout;
 +    }
 +
 +    /** {@inheritDoc} */
 +    public boolean isLongQueryExplain() {
 +        return longQryExplain;
 +    }
 +
 +    /**
 +     * If {@code true}, SPI will print SQL execution plan for long queries 
(explain SQL query).
 +     * The time threshold of long queries is controlled via {@link 
#setLongQueryExecutionTimeout(long)}
 +     * parameter.
 +     * <p>
 +     * If not provided, default value is {@code false}.
 +     *
 +     * @param longQryExplain Flag marking SPI should print SQL execution plan 
for long queries (explain SQL query).
 +     * @see #setLongQueryExecutionTimeout(long)
 +     */
 +    public void setLongQueryExplain(boolean longQryExplain) {
 +        this.longQryExplain = longQryExplain;
 +    }
 +
 +    /**
-      * The flag indicating that serializer for H2 database will be set to 
GridGain's marshaller.
++     * The flag indicating that serializer for H2 database will be set to 
Ignite's marshaller.
 +     * This setting usually makes sense for offheap indexing only.
 +     * <p>
 +     * Default is {@link #DFLT_USE_OPTIMIZED_SERIALIZER}.
 +     *
 +     * @param useOptimizedSerializer Flag value.
 +     */
 +    public void setUseOptimizedSerializer(boolean useOptimizedSerializer) {
 +        this.useOptimizedSerializer = useOptimizedSerializer;
 +    }
 +
 +    /**
-      * The flag indicating that serializer for H2 database will be set to 
GridGain's marshaller.
++     * The flag indicating that serializer for H2 database will be set to 
Ignite's marshaller.
 +     * This setting usually makes sense for offheap indexing only.
 +     * <p>
 +     * Default is {@link #DFLT_USE_OPTIMIZED_SERIALIZER}.
 +     *
 +     * @return Flag value.
 +     */
 +    public boolean isUseOptimizedSerializer() {
 +        return useOptimizedSerializer;
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/core/src/main/java/org/apache/ignite/events/AuthenticationEvent.java
----------------------------------------------------------------------
diff --cc 
modules/core/src/main/java/org/apache/ignite/events/AuthenticationEvent.java
index ed6c204,0000000..922f7f7
mode 100644,000000..100644
--- 
a/modules/core/src/main/java/org/apache/ignite/events/AuthenticationEvent.java
+++ 
b/modules/core/src/main/java/org/apache/ignite/events/AuthenticationEvent.java
@@@ -1,172 -1,0 +1,172 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.ignite.events;
 +
 +import org.apache.ignite.cluster.*;
 +import org.apache.ignite.internal.util.tostring.*;
 +import org.apache.ignite.internal.util.typedef.internal.*;
 +import org.apache.ignite.plugin.security.*;
 +
 +import java.util.*;
 +
 +/**
 + * Grid authentication event.
 + * <p>
 + * Grid events are used for notification about what happens within the grid. 
Note that by
-  * design GridGain keeps all events generated on the local node locally and 
it provides
++ * design Ignite keeps all events generated on the local node locally and it 
provides
 + * APIs for performing a distributed queries across multiple nodes:
 + * <ul>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#remoteQuery(org.apache.ignite.lang.IgnitePredicate,
 long, int...)} -
 + *          asynchronously querying events occurred on the nodes specified, 
including remote nodes.
 + *      </li>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#localQuery(org.apache.ignite.lang.IgnitePredicate,
 int...)} -
 + *          querying only local events stored on this local node.
 + *      </li>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#localListen(org.apache.ignite.lang.IgnitePredicate,
 int...)} -
 + *          listening to local grid events (events from remote nodes not 
included).
 + *      </li>
 + * </ul>
 + * User can also wait for events using method {@link 
org.apache.ignite.IgniteEvents#waitForLocal(org.apache.ignite.lang.IgnitePredicate,
 int...)}.
 + * <h1 class="header">Events and Performance</h1>
 + * It is <b>highly recommended</b> to enable only those events that your 
application logic requires
-  * by using {@link 
org.apache.ignite.configuration.IgniteConfiguration#getIncludeEventTypes()} 
method in GridGain configuration. Note that certain
-  * events are required for GridGain's internal operations and such events 
will still be generated but not stored by
-  * event storage SPI if they are disabled in GridGain configuration.
++ * by using {@link 
org.apache.ignite.configuration.IgniteConfiguration#getIncludeEventTypes()} 
method in Ignite configuration. Note that certain
++ * events are required for Ignite's internal operations and such events will 
still be generated but not stored by
++ * event storage SPI if they are disabled in Ignite configuration.
 + * @see EventType#EVT_AUTHENTICATION_FAILED
 + * @see EventType#EVT_AUTHENTICATION_SUCCEEDED
 + */
 +public class AuthenticationEvent extends EventAdapter {
 +    /** */
 +    private static final long serialVersionUID = 0L;
 +
 +    /**  Subject type. */
 +    private GridSecuritySubjectType subjType;
 +
 +    /** Subject ID. */
 +    private UUID subjId;
 +
 +    /** Login. */
 +    @GridToStringInclude
 +    private Object login;
 +
 +    /** {@inheritDoc} */
 +    @Override public String shortDisplay() {
 +        return name() + ": subjType=" + subjType;
 +    }
 +
 +    /**
 +     * No-arg constructor.
 +     */
 +    public AuthenticationEvent() {
 +        // No-op.
 +    }
 +
 +    /**
 +     * Creates authentication event with given parameters.
 +     *
 +     * @param msg Optional message.
 +     * @param type Event type.
 +     */
 +    public AuthenticationEvent(ClusterNode node, String msg, int type) {
 +        super(node, msg, type);
 +    }
 +
 +    /**
 +     * Creates authentication event with given parameters.
 +     *
 +     * @param node Node.
 +     * @param msg Optional message.
 +     * @param type Event type.
 +     * @param subjType Subject type.
 +     * @param subjId Subject ID.
 +     */
 +    public AuthenticationEvent(ClusterNode node, String msg, int type, 
GridSecuritySubjectType subjType,
 +        UUID subjId, Object login) {
 +        super(node, msg, type);
 +
 +        this.subjType = subjType;
 +        this.subjId = subjId;
 +        this.login = login;
 +    }
 +
 +    /**
 +     * Gets subject type that triggered the event.
 +     *
 +     * @return Subject type that triggered the event.
 +     */
 +    public GridSecuritySubjectType subjectType() {
 +        return subjType;
 +    }
 +
 +    /**
 +     * Gets subject ID that triggered the event.
 +     *
 +     * @return Subject ID that triggered the event.
 +     */
 +    public UUID subjectId() {
 +        return subjId;
 +    }
 +
 +    /**
 +     * Sets subject type that triggered the event.
 +     *
 +     * @param subjType Subject type to set.
 +     */
 +    public void subjectType(GridSecuritySubjectType subjType) {
 +        this.subjType = subjType;
 +    }
 +
 +    /**
 +     * Gets login that triggered event.
 +     *
 +     * @return Login object.
 +     */
 +    public Object login() {
 +        return login;
 +    }
 +
 +    /**
 +     * Sets login that triggered event.
 +     *
 +     * @param login Login object.
 +     */
 +    public void login(Object login) {
 +        this.login = login;
 +    }
 +
 +    /**
 +     * Sets subject ID that triggered the event.
 +     *
 +     * @param subjId Subject ID to set.
 +     */
 +    public void subjectId(UUID subjId) {
 +        this.subjId = subjId;
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public String toString() {
 +        return S.toString(AuthenticationEvent.class, this,
 +            "nodeId8", U.id8(node().id()),
 +            "msg", message(),
 +            "type", name(),
 +            "tstamp", timestamp());
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/core/src/main/java/org/apache/ignite/events/AuthorizationEvent.java
----------------------------------------------------------------------
diff --cc 
modules/core/src/main/java/org/apache/ignite/events/AuthorizationEvent.java
index 4685f3e,0000000..9aa1e3b
mode 100644,000000..100644
--- 
a/modules/core/src/main/java/org/apache/ignite/events/AuthorizationEvent.java
+++ 
b/modules/core/src/main/java/org/apache/ignite/events/AuthorizationEvent.java
@@@ -1,146 -1,0 +1,146 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.ignite.events;
 +
 +import org.apache.ignite.cluster.*;
 +import org.apache.ignite.internal.util.typedef.internal.*;
 +import org.apache.ignite.plugin.security.*;
 +
 +/**
 + * Grid authorization event.
 + * <p>
 + * Grid events are used for notification about what happens within the grid. 
Note that by
-  * design GridGain keeps all events generated on the local node locally and 
it provides
++ * design Ignite keeps all events generated on the local node locally and it 
provides
 + * APIs for performing a distributed queries across multiple nodes:
 + * <ul>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#remoteQuery(org.apache.ignite.lang.IgnitePredicate,
 long, int...)} -
 + *          asynchronously querying events occurred on the nodes specified, 
including remote nodes.
 + *      </li>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#localQuery(org.apache.ignite.lang.IgnitePredicate,
 int...)} -
 + *          querying only local events stored on this local node.
 + *      </li>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#localListen(org.apache.ignite.lang.IgnitePredicate,
 int...)} -
 + *          listening to local grid events (events from remote nodes not 
included).
 + *      </li>
 + * </ul>
 + * User can also wait for events using method {@link 
org.apache.ignite.IgniteEvents#waitForLocal(org.apache.ignite.lang.IgnitePredicate,
 int...)}.
 + * <h1 class="header">Events and Performance</h1>
 + * It is <b>highly recommended</b> to enable only those events that your 
application logic requires
-  * by using {@link 
org.apache.ignite.configuration.IgniteConfiguration#getIncludeEventTypes()} 
method in GridGain configuration. Note that certain
-  * events are required for GridGain's internal operations and such events 
will still be generated but not stored by
++ * by using {@link 
org.apache.ignite.configuration.IgniteConfiguration#getIncludeEventTypes()} 
method in Ignite configuration. Note that certain
++ * events are required for Ignite's internal operations and such events will 
still be generated but not stored by
 + * event storage SPI if they are disabled in GridGain configuration.
 + * @see EventType#EVT_AUTHORIZATION_FAILED
 + * @see EventType#EVT_AUTHORIZATION_SUCCEEDED
 + */
 +public class AuthorizationEvent extends EventAdapter {
 +    /** */
 +    private static final long serialVersionUID = 0L;
 +
 +    /** Requested operation. */
 +    private GridSecurityPermission op;
 +
 +    /** Authenticated subject authorized to perform operation. */
 +    private GridSecuritySubject subj;
 +
 +    /** {@inheritDoc} */
 +    @Override public String shortDisplay() {
 +        return name() + ": op=" + op;
 +    }
 +
 +    /**
 +     * No-arg constructor.
 +     */
 +    public AuthorizationEvent() {
 +        // No-op.
 +    }
 +
 +    /**
 +     * Creates authorization event with given parameters.
 +     *
 +     * @param msg Optional message.
 +     * @param type Event type.
 +     */
 +    public AuthorizationEvent(ClusterNode node, String msg, int type) {
 +        super(node, msg, type);
 +    }
 +
 +    /**
 +     * Creates authorization event with given parameters.
 +     *
 +     * @param node Node.
 +     * @param msg Optional message.
 +     * @param type Event type.
 +     * @param op Requested operation.
 +     * @param subj Authenticated subject.
 +     */
 +    public AuthorizationEvent(ClusterNode node, String msg, int type, 
GridSecurityPermission op,
 +        GridSecuritySubject subj) {
 +        super(node, msg, type);
 +
 +        this.op = op;
 +        this.subj = subj;
 +    }
 +
 +    /**
 +     * Gets requested operation.
 +     *
 +     * @return Requested operation.
 +     */
 +    public GridSecurityPermission operation() {
 +        return op;
 +    }
 +
 +    /**
 +     * Sets requested operation.
 +     *
 +     * @param op Requested operation.
 +     */
 +    public void operation(GridSecurityPermission op) {
 +        this.op = op;
 +    }
 +
 +    /**
 +     * Gets authenticated subject.
 +     *
 +     * @return Authenticated subject.
 +     */
 +    public GridSecuritySubject subject() {
 +        return subj;
 +    }
 +
 +    /**
 +     * Sets authenticated subject.
 +     *
 +     * @param subj Authenticated subject.
 +     */
 +    public void subject(GridSecuritySubject subj) {
 +        this.subj = subj;
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public String toString() {
 +        return S.toString(AuthorizationEvent.class, this,
 +            "nodeId8", U.id8(node().id()),
 +            "msg", message(),
 +            "type", name(),
 +            "tstamp", timestamp());
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/core/src/main/java/org/apache/ignite/events/CacheEvent.java
----------------------------------------------------------------------
diff --cc modules/core/src/main/java/org/apache/ignite/events/CacheEvent.java
index 33e934a,0000000..a380c97
mode 100644,000000..100644
--- a/modules/core/src/main/java/org/apache/ignite/events/CacheEvent.java
+++ b/modules/core/src/main/java/org/apache/ignite/events/CacheEvent.java
@@@ -1,331 -1,0 +1,331 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.ignite.events;
 +
 +import org.apache.ignite.cluster.*;
 +import org.apache.ignite.internal.util.tostring.*;
 +import org.apache.ignite.internal.util.typedef.internal.*;
 +import org.apache.ignite.lang.*;
 +import org.jetbrains.annotations.*;
 +
 +import java.util.*;
 +
 +/**
 + * In-memory database (cache) event.
 + * <p>
 + * Grid events are used for notification about what happens within the grid. 
Note that by
-  * design GridGain keeps all events generated on the local node locally and 
it provides
++ * design Ignite keeps all events generated on the local node locally and it 
provides
 + * APIs for performing a distributed queries across multiple nodes:
 + * <ul>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#remoteQuery(org.apache.ignite.lang.IgnitePredicate,
 long, int...)} -
 + *          asynchronously querying events occurred on the nodes specified, 
including remote nodes.
 + *      </li>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#localQuery(org.apache.ignite.lang.IgnitePredicate,
 int...)} -
 + *          querying only local events stored on this local node.
 + *      </li>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#localListen(org.apache.ignite.lang.IgnitePredicate,
 int...)} -
 + *          listening to local grid events (events from remote nodes not 
included).
 + *      </li>
 + * </ul>
 + * User can also wait for events using method {@link 
org.apache.ignite.IgniteEvents#waitForLocal(org.apache.ignite.lang.IgnitePredicate,
 int...)}.
 + * <h1 class="header">Events and Performance</h1>
-  * Note that by default all events in GridGain are enabled and therefore 
generated and stored
-  * by whatever event storage SPI is configured. GridGain can and often does 
generate thousands events per seconds
++ * Note that by default all events in Ignite are enabled and therefore 
generated and stored
++ * by whatever event storage SPI is configured. Ignite can and often does 
generate thousands events per seconds
 + * under the load and therefore it creates a significant additional load on 
the system. If these events are
 + * not needed by the application this load is unnecessary and leads to 
significant performance degradation.
 + * <p>
 + * It is <b>highly recommended</b> to enable only those events that your 
application logic requires
-  * by using {@link 
org.apache.ignite.configuration.IgniteConfiguration#getIncludeEventTypes()} 
method in GridGain configuration. Note that certain
-  * events are required for GridGain's internal operations and such events 
will still be generated but not stored by
++ * by using {@link 
org.apache.ignite.configuration.IgniteConfiguration#getIncludeEventTypes()} 
method in Ignite configuration. Note that certain
++ * events are required for Ignite's internal operations and such events will 
still be generated but not stored by
 + * event storage SPI if they are disabled in GridGain configuration.
 + * @see EventType#EVT_CACHE_ENTRY_CREATED
 + * @see EventType#EVT_CACHE_ENTRY_DESTROYED
 + * @see EventType#EVT_CACHE_ENTRY_EVICTED
 + * @see EventType#EVT_CACHE_OBJECT_PUT
 + * @see EventType#EVT_CACHE_OBJECT_READ
 + * @see EventType#EVT_CACHE_OBJECT_REMOVED
 + * @see EventType#EVT_CACHE_OBJECT_LOCKED
 + * @see EventType#EVT_CACHE_OBJECT_UNLOCKED
 + * @see EventType#EVT_CACHE_OBJECT_SWAPPED
 + * @see EventType#EVT_CACHE_OBJECT_UNSWAPPED
 + * @see EventType#EVT_CACHE_OBJECT_EXPIRED
 + */
 +public class CacheEvent extends EventAdapter {
 +    /** */
 +    private static final long serialVersionUID = 0L;
 +
 +    /** Cache name. */
 +    private String cacheName;
 +
 +    /** Partition for the event. */
 +    private int part;
 +
 +    /** Cache entry. */
 +    @GridToStringInclude
 +    private Object key;
 +
 +    /** Event ID. */
 +    @GridToStringInclude
 +    private final IgniteUuid xid;
 +
 +    /** Lock ID. */
 +    @GridToStringInclude
 +    private final Object lockId;
 +
 +    /** New value. */
 +    @GridToStringInclude
 +    private final Object newVal;
 +
 +    /** Old value. */
 +    @GridToStringInclude
 +    private final Object oldVal;
 +
 +    /**
 +     * Flag indicating whether old value is present in case if we
 +     * don't have it in deserialized form.
 +     */
 +    @GridToStringInclude
 +    private final boolean hasOldVal;
 +
 +    /**
 +     * Flag indicating whether new value is present in case if we
 +     * don't have it in deserialized form.
 +     */
 +    @GridToStringInclude
 +    private final boolean hasNewVal;
 +
 +    /** Event node. */
 +    @GridToStringExclude
 +    @Nullable private final ClusterNode evtNode;
 +
 +    /** Flag indicating whether event happened on {@code near} or {@code 
partitioned} cache. */
 +    @GridToStringInclude
 +    private boolean near;
 +
 +    /** Subject ID. */
 +    @GridToStringInclude
 +    private UUID subjId;
 +
 +    /** Closure class name. */
 +    @GridToStringInclude
 +    private String cloClsName;
 +
 +    /** Task name if update was initiated within task execution. */
 +    @GridToStringInclude
 +    private String taskName;
 +
 +    /**
 +     * Constructs cache event.
 +     *
 +     * @param cacheName Cache name.
 +     * @param node Local node.
 +     * @param evtNode Event node ID.
 +     * @param msg Event message.
 +     * @param type Event type.
 +     * @param part Partition for the event (usually the partition the key 
belongs to).
 +     * @param near Flag indicating whether event happened on {@code near} or 
{@code partitioned} cache.
 +     * @param key Cache key.
 +     * @param xid Transaction ID.
 +     * @param lockId Lock ID.
 +     * @param newVal New value.
 +     * @param hasNewVal Flag indicating whether new value is present in case 
if we
 +     *      don't have it in deserialized form.
 +     * @param oldVal Old value.
 +     * @param hasOldVal Flag indicating whether old value is present in case 
if we
 +     *      don't have it in deserialized form.
 +     * @param subjId Subject ID.
 +     * @param cloClsName Closure class name.
 +     */
 +    public CacheEvent(String cacheName, ClusterNode node, @Nullable 
ClusterNode evtNode, String msg, int type, int part,
 +        boolean near, Object key, IgniteUuid xid, Object lockId, Object 
newVal, boolean hasNewVal,
 +        Object oldVal, boolean hasOldVal, UUID subjId, String cloClsName, 
String taskName) {
 +        super(node, msg, type);
 +        this.cacheName = cacheName;
 +        this.evtNode = evtNode;
 +        this.part = part;
 +        this.near = near;
 +        this.key = key;
 +        this.xid = xid;
 +        this.lockId = lockId;
 +        this.newVal = newVal;
 +        this.hasNewVal = hasNewVal;
 +        this.oldVal = oldVal;
 +        this.hasOldVal = hasOldVal;
 +        this.subjId = subjId;
 +        this.cloClsName = cloClsName;
 +        this.taskName = taskName;
 +    }
 +
 +    /**
 +     * Gets cache name.
 +     *
 +     * @return Cache name.
 +     */
 +    @Nullable public String cacheName() {
 +        return cacheName;
 +    }
 +
 +    /**
 +     * Gets partition for the event which is the partition the key belongs to.
 +     *
 +     * @return Partition for the event.
 +     */
 +    public int partition() {
 +        return part;
 +    }
 +
 +    /**
 +     * Gets flag indicating whether event happened on {@code near} or {@code 
partitioned} cache.
 +     *
 +     * @return Flag indicating whether event happened on {@code near} or 
{@code partitioned} cache.
 +     */
 +    public boolean isNear() {
 +        return near;
 +    }
 +
 +    /**
 +     * Gets node which initiated cache operation or {@code null} if that node 
is not available.
 +     *
 +     * @return Node which initiated cache operation or {@code null} if that 
node is not available.
 +     */
 +    @Nullable public ClusterNode eventNode() {
 +        return evtNode;
 +    }
 +
 +    /**
 +     * Gets cache entry associated with event.
 +     *
 +     * @return Cache entry associated with event.
 +     */
 +    @SuppressWarnings({"unchecked"})
 +    @Nullable public <K> K key() {
 +        return (K)key;
 +    }
 +
 +    /**
 +     * ID of surrounding cache cache transaction or <tt>null</tt> if there is
 +     * no surrounding transaction.
 +     *
 +     * @return ID of surrounding cache transaction.
 +     */
 +    @Nullable public IgniteUuid xid() {
 +        return xid;
 +    }
 +
 +    /**
 +     * ID of the lock if held or <tt>null</tt> if no lock held.
 +     *
 +     * @return ID of the lock if held.
 +     */
 +    @Nullable public Object lockId() {
 +        return lockId;
 +    }
 +
 +    /**
 +     * Gets new value for this event.
 +     *
 +     * @return New value associated with event (<tt>null</tt> if event is
 +     *      {@link EventType#EVT_CACHE_OBJECT_REMOVED}.
 +     */
 +    @Nullable public Object newValue() {
 +        return newVal;
 +    }
 +
 +    /**
 +     * Gets old value associated with this event.
 +     *
 +     * @return Old value associated with event.
 +     */
 +    @Nullable public Object oldValue() {
 +        return oldVal;
 +    }
 +
 +    /**
 +     * Gets flag indicating whether cache entry has old value in case if
 +     * we only have old value in serialized form in which case {@link 
#oldValue()}
 +     * will return {@code null}.
 +     *
 +     * @return Flag indicating whether there is old value associated with 
this event.
 +     */
 +    public boolean hasOldValue() {
 +        return hasOldVal;
 +    }
 +
 +    /**
 +     * Gets flag indicating whether cache entry has new value in case if
 +     * we only have new value in serialized form in which case {@link 
#newValue()}
 +     * will return {@code null}.
 +     *
 +     * @return Flag indicating whether there is new value associated with 
this event.
 +     */
 +    public boolean hasNewValue() {
 +        return hasNewVal;
 +    }
 +
 +    /**
 +     * Gets security subject ID initiated this cache event, if available. 
This property is available only for
 +     * {@link EventType#EVT_CACHE_OBJECT_PUT}, {@link 
EventType#EVT_CACHE_OBJECT_REMOVED} and
 +     * {@link EventType#EVT_CACHE_OBJECT_READ} cache events.
 +     * <p>
 +     * Subject ID will be set either to nodeId initiated cache update or read 
or client ID initiated
 +     * cache update or read.
 +     *
 +     * @return Subject ID.
 +     */
 +    @Nullable public UUID subjectId() {
 +        return subjId;
 +    }
 +
 +    /**
 +     * Gets closure class name (applicable only for TRANSFORM operations).
 +     *
 +     * @return Closure class name.
 +     */
 +    @Nullable public String closureClassName() {
 +        return cloClsName;
 +    }
 +
 +    /**
 +     * Gets task name if cache event was caused by an operation initiated 
within task execution.
 +     *
 +     * @return Task name.
 +     */
 +    @Nullable public String taskName() {
 +        return taskName;
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public String shortDisplay() {
 +        return name() + ": near=" + near + ", key=" + key + ", hasNewVal=" + 
hasNewVal + ", hasOldVal=" + hasOldVal +
 +            ", nodeId8=" + U.id8(node().id());
 +    }
 +
 +    /** {@inheritDoc} */
 +    @SuppressWarnings("ConstantConditions")
 +    @Override public String toString() {
 +        return S.toString(CacheEvent.class, this,
 +            "nodeId8", U.id8(node().id()),
 +            "evtNodeId8", U.id8(evtNode.id()),
 +            "msg", message(),
 +            "type", name(),
 +            "tstamp", timestamp());
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/core/src/main/java/org/apache/ignite/events/CachePreloadingEvent.java
----------------------------------------------------------------------
diff --cc 
modules/core/src/main/java/org/apache/ignite/events/CachePreloadingEvent.java
index 3261dca,0000000..25ec68a
mode 100644,000000..100644
--- 
a/modules/core/src/main/java/org/apache/ignite/events/CachePreloadingEvent.java
+++ 
b/modules/core/src/main/java/org/apache/ignite/events/CachePreloadingEvent.java
@@@ -1,172 -1,0 +1,172 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.ignite.events;
 +
 +import org.apache.ignite.cluster.*;
 +import org.apache.ignite.internal.util.typedef.internal.*;
 +
 +/**
 + * In-memory database (cache) preloading event. Preload event happens every 
time there is a change
 + * in grid topology, which means that a node has either joined or left the 
grid.
 + * <p>
 + * Grid events are used for notification about what happens within the grid. 
Note that by
-  * design GridGain keeps all events generated on the local node locally and 
it provides
++ * design Ignite keeps all events generated on the local node locally and it 
provides
 + * APIs for performing a distributed queries across multiple nodes:
 + * <ul>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#remoteQuery(org.apache.ignite.lang.IgnitePredicate,
 long, int...)} -
 + *          asynchronously querying events occurred on the nodes specified, 
including remote nodes.
 + *      </li>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#localQuery(org.apache.ignite.lang.IgnitePredicate,
 int...)} -
 + *          querying only local events stored on this local node.
 + *      </li>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#localListen(org.apache.ignite.lang.IgnitePredicate,
 int...)} -
 + *          listening to local grid events (events from remote nodes not 
included).
 + *      </li>
 + * </ul>
 + * User can also wait for events using method {@link 
org.apache.ignite.IgniteEvents#waitForLocal(org.apache.ignite.lang.IgnitePredicate,
 int...)}.
 + * <h1 class="header">Events and Performance</h1>
-  * Note that by default all events in GridGain are enabled and therefore 
generated and stored
-  * by whatever event storage SPI is configured. GridGain can and often does 
generate thousands events per seconds
++ * Note that by default all events in Ignite are enabled and therefore 
generated and stored
++ * by whatever event storage SPI is configured. Ignite can and often does 
generate thousands events per seconds
 + * under the load and therefore it creates a significant additional load on 
the system. If these events are
 + * not needed by the application this load is unnecessary and leads to 
significant performance degradation.
 + * <p>
 + * It is <b>highly recommended</b> to enable only those events that your 
application logic requires
-  * by using {@link 
org.apache.ignite.configuration.IgniteConfiguration#getIncludeEventTypes()} 
method in GridGain configuration. Note that certain
-  * events are required for GridGain's internal operations and such events 
will still be generated but not stored by
-  * event storage SPI if they are disabled in GridGain configuration.
++ * by using {@link 
org.apache.ignite.configuration.IgniteConfiguration#getIncludeEventTypes()} 
method in Ignite configuration. Note that certain
++ * events are required for Ignite's internal operations and such events will 
still be generated but not stored by
++ * event storage SPI if they are disabled in Ignite configuration.
 + * @see EventType#EVT_CACHE_PRELOAD_PART_LOADED
 + * @see EventType#EVT_CACHE_PRELOAD_PART_UNLOADED
 + * @see EventType#EVT_CACHE_PRELOAD_STARTED
 + * @see EventType#EVT_CACHE_PRELOAD_STOPPED
 + */
 +public class CachePreloadingEvent extends EventAdapter {
 +    /** */
 +    private static final long serialVersionUID = 0L;
 +
 +    /** Cache name. */
 +    private String cacheName;
 +
 +    /** Partition for the event. */
 +    private int part;
 +
 +    /** Discovery node. */
 +    private ClusterNode discoNode;
 +
 +    /** Discovery event type. */
 +    private int discoEvtType;
 +
 +    /** Discovery event time. */
 +    private long discoTs;
 +
 +    /**
 +     * Constructs cache event.
 +     *
 +     * @param cacheName Cache name.
 +     * @param node Event node.
 +     * @param msg Event message.
 +     * @param type Event type.
 +     * @param part Partition for the event (usually the partition the key 
belongs to).
 +     * @param discoNode Node that triggered this preloading event.
 +     * @param discoEvtType Discovery event type that triggered this 
preloading event.
 +     * @param discoTs Timestamp of discovery event that triggered this 
preloading event.
 +     */
 +    public CachePreloadingEvent(String cacheName, ClusterNode node, String 
msg, int type, int part,
 +        ClusterNode discoNode, int discoEvtType, long discoTs) {
 +        super(node, msg, type);
 +        this.cacheName = cacheName;
 +        this.part = part;
 +        this.discoNode = discoNode;
 +        this.discoEvtType = discoEvtType;
 +        this.discoTs = discoTs;
 +    }
 +
 +    /**
 +     * Gets cache name.
 +     *
 +     * @return Cache name.
 +     */
 +    public String cacheName() {
 +        return cacheName;
 +    }
 +
 +    /**
 +     * Gets partition for the event.
 +     *
 +     * @return Partition for the event.
 +     */
 +    public int partition() {
 +        return part;
 +    }
 +
 +    /**
 +     * Gets shadow of the node that triggered this preloading event.
 +     *
 +     * @return Shadow of the node that triggered this preloading event.
 +     */
 +    public ClusterNode discoveryNode() {
 +        return discoNode;
 +    }
 +
 +    /**
 +     * Gets type of discovery event that triggered this preloading event.
 +     *
 +     * @return Type of discovery event that triggered this preloading event.
 +     * @see DiscoveryEvent#type()
 +     */
 +    public int discoveryEventType() {
 +        return discoEvtType;
 +    }
 +
 +    /**
 +     * Gets name of discovery event that triggered this preloading event.
 +     *
 +     * @return Name of discovery event that triggered this preloading event.
 +     * @see DiscoveryEvent#name()
 +     */
 +    public String discoveryEventName() {
 +        return U.gridEventName(discoEvtType);
 +    }
 +
 +    /**
 +     * Gets timestamp of discovery event that caused this preloading event.
 +     *
 +     * @return Timestamp of discovery event that caused this preloading event.
 +     */
 +    public long discoveryTimestamp() {
 +        return discoTs;
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public String shortDisplay() {
 +        return name() + ": cache=" + CU.mask(cacheName) + ", cause=" +
 +            discoveryEventName();
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public String toString() {
 +        return S.toString(CachePreloadingEvent.class, this,
 +            "discoEvtName", discoveryEventName(),
 +            "nodeId8", U.id8(node().id()),
 +            "msg", message(),
 +            "type", name(),
 +            "tstamp", timestamp());
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/b08492a5/modules/core/src/main/java/org/apache/ignite/events/CacheQueryExecutedEvent.java
----------------------------------------------------------------------
diff --cc 
modules/core/src/main/java/org/apache/ignite/events/CacheQueryExecutedEvent.java
index 0ff4633,0000000..d44ebd8
mode 100644,000000..100644
--- 
a/modules/core/src/main/java/org/apache/ignite/events/CacheQueryExecutedEvent.java
+++ 
b/modules/core/src/main/java/org/apache/ignite/events/CacheQueryExecutedEvent.java
@@@ -1,237 -1,0 +1,237 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.ignite.events;
 +
 +import org.apache.ignite.cache.query.*;
 +import org.apache.ignite.cluster.*;
 +import org.apache.ignite.internal.util.tostring.*;
 +import org.apache.ignite.internal.util.typedef.internal.*;
 +import org.apache.ignite.lang.*;
 +import org.jetbrains.annotations.*;
 +
 +import java.util.*;
 +
 +/**
 + * Cache query execution event.
 + * <p>
 + * Grid events are used for notification about what happens within the grid. 
Note that by
-  * design GridGain keeps all events generated on the local node locally and 
it provides
++ * design Ignite keeps all events generated on the local node locally and it 
provides
 + * APIs for performing a distributed queries across multiple nodes:
 + * <ul>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#remoteQuery(org.apache.ignite.lang.IgnitePredicate,
 long, int...)} -
 + *          asynchronously querying events occurred on the nodes specified, 
including remote nodes.
 + *      </li>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#localQuery(org.apache.ignite.lang.IgnitePredicate,
 int...)} -
 + *          querying only local events stored on this local node.
 + *      </li>
 + *      <li>
 + *          {@link 
org.apache.ignite.IgniteEvents#localListen(org.apache.ignite.lang.IgnitePredicate,
 int...)} -
 + *          listening to local grid events (events from remote nodes not 
included).
 + *      </li>
 + * </ul>
 + * User can also wait for events using method {@link 
org.apache.ignite.IgniteEvents#waitForLocal(org.apache.ignite.lang.IgnitePredicate,
 int...)}.
 + * <h1 class="header">Events and Performance</h1>
-  * Note that by default all events in GridGain are enabled and therefore 
generated and stored
-  * by whatever event storage SPI is configured. GridGain can and often does 
generate thousands events per seconds
++ * Note that by default all events in Ignite are enabled and therefore 
generated and stored
++ * by whatever event storage SPI is configured. Ignite can and often does 
generate thousands events per seconds
 + * under the load and therefore it creates a significant additional load on 
the system. If these events are
 + * not needed by the application this load is unnecessary and leads to 
significant performance degradation.
 + * <p>
 + * It is <b>highly recommended</b> to enable only those events that your 
application logic requires
-  * by using {@link 
org.apache.ignite.configuration.IgniteConfiguration#getIncludeEventTypes()} 
method in GridGain configuration. Note that certain
-  * events are required for GridGain's internal operations and such events 
will still be generated but not stored by
-  * event storage SPI if they are disabled in GridGain configuration.
++ * by using {@link 
org.apache.ignite.configuration.IgniteConfiguration#getIncludeEventTypes()} 
method in Ignite configuration. Note that certain
++ * events are required for Ignite's internal operations and such events will 
still be generated but not stored by
++ * event storage SPI if they are disabled in Ignite configuration.
 + *
 + * @see EventType#EVT_CACHE_QUERY_EXECUTED
 + * @see EventType#EVTS_CACHE_QUERY
 + */
 +public class CacheQueryExecutedEvent<K, V> extends EventAdapter {
 +    /** */
 +    private static final long serialVersionUID = 3738753361235304496L;
 +
 +    /** Query type. */
 +    private final CacheQueryType qryType;
 +
 +    /** Cache name. */
 +    private final String cacheName;
 +
 +    /** Class name. */
 +    private final String clsName;
 +
 +    /** Clause. */
 +    private final String clause;
 +
 +    /** Scan query filter. */
 +    @GridToStringInclude
 +    private final IgniteBiPredicate<K, V> scanQryFilter;
 +
 +    /** Continuous query filter. */
 +    @GridToStringInclude
 +    private final IgnitePredicate<CacheContinuousQueryEntry<K, V>> 
contQryFilter;
 +
 +    /** Query arguments. */
 +    @GridToStringInclude
 +    private final Object[] args;
 +
 +    /** Security subject ID. */
 +    private final UUID subjId;
 +
 +    /** Task name. */
 +    private final String taskName;
 +
 +    /**
 +     * @param node Node where event was fired.
 +     * @param msg Event message.
 +     * @param type Event type.
 +     * @param qryType Query type.
 +     * @param cacheName Cache name.
 +     * @param clsName Class name.
 +     * @param clause Clause.
 +     * @param scanQryFilter Scan query filter.
 +     * @param args Query arguments.
 +     * @param subjId Security subject ID.
 +     */
 +    public CacheQueryExecutedEvent(
 +        ClusterNode node,
 +        String msg,
 +        int type,
 +        CacheQueryType qryType,
 +        @Nullable String cacheName,
 +        @Nullable String clsName,
 +        @Nullable String clause,
 +        @Nullable IgniteBiPredicate<K, V> scanQryFilter,
 +        @Nullable IgnitePredicate<CacheContinuousQueryEntry<K, V>> 
contQryFilter,
 +        @Nullable Object[] args,
 +        @Nullable UUID subjId,
 +        @Nullable String taskName) {
 +        super(node, msg, type);
 +
 +        assert qryType != null;
 +
 +        this.qryType = qryType;
 +        this.cacheName = cacheName;
 +        this.clsName = clsName;
 +        this.clause = clause;
 +        this.scanQryFilter = scanQryFilter;
 +        this.contQryFilter = contQryFilter;
 +        this.args = args;
 +        this.subjId = subjId;
 +        this.taskName = taskName;
 +    }
 +
 +    /**
 +     * Gets query type.
 +     *
 +     * @return Query type.
 +     */
 +    public CacheQueryType queryType() {
 +        return qryType;
 +    }
 +
 +    /**
 +     * Gets cache name on which query was executed.
 +     *
 +     * @return Cache name.
 +     */
 +    @Nullable public String cacheName() {
 +        return cacheName;
 +    }
 +
 +    /**
 +     * Gets queried class name.
 +     * <p>
 +     * Applicable for {@code SQL} and @{code full text} queries.
 +     *
 +     * @return Queried class name.
 +     */
 +    @Nullable public String className() {
 +        return clsName;
 +    }
 +
 +    /**
 +     * Gets query clause.
 +     * <p>
 +     * Applicable for {@code SQL}, {@code SQL fields} and @{code full text} 
queries.
 +     *
 +     * @return Query clause.
 +     */
 +    @Nullable public String clause() {
 +        return clause;
 +    }
 +
 +    /**
 +     * Gets scan query filter.
 +     * <p>
 +     * Applicable for {@code scan} queries.
 +     *
 +     * @return Scan query filter.
 +     */
 +    @Nullable public IgniteBiPredicate<K, V> scanQueryFilter() {
 +        return scanQryFilter;
 +    }
 +
 +    /**
 +     * Gets continuous query filter.
 +     * <p>
 +     * Applicable for {@code continuous} queries.
 +     *
 +     * @return Continuous query filter.
 +     */
 +    @Nullable public IgnitePredicate<CacheContinuousQueryEntry<K, V>> 
continuousQueryFilter() {
 +        return contQryFilter;
 +    }
 +
 +    /**
 +     * Gets query arguments.
 +     * <p>
 +     * Applicable for {@code SQL} and {@code SQL fields} queries.
 +     *
 +     * @return Query arguments.
 +     */
 +    @Nullable public Object[] arguments() {
 +        return args;
 +    }
 +
 +    /**
 +     * Gets security subject ID.
 +     *
 +     * @return Security subject ID.
 +     */
 +    @Nullable public UUID subjectId() {
 +        return subjId;
 +    }
 +
 +    /**
 +     * Gets the name of the task that executed the query (if any).
 +     *
 +     * @return Task name.
 +     */
 +    @Nullable public String taskName() {
 +        return taskName;
 +    }
 +
 +    /** {@inheritDoc} */
 +    @Override public String toString() {
 +        return S.toString(CacheQueryExecutedEvent.class, this,
 +            "nodeId8", U.id8(node().id()),
 +            "msg", message(),
 +            "type", name(),
 +            "tstamp", timestamp());
 +    }
 +}

Reply via email to