http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/e1d8f69b/modules/hadoop/src/test/java/org/apache/ignite/loadtests/ggfs/GridGgfsPerformanceBenchmark.java
----------------------------------------------------------------------
diff --git 
a/modules/hadoop/src/test/java/org/apache/ignite/loadtests/ggfs/GridGgfsPerformanceBenchmark.java
 
b/modules/hadoop/src/test/java/org/apache/ignite/loadtests/ggfs/GridGgfsPerformanceBenchmark.java
new file mode 100644
index 0000000..3eea433
--- /dev/null
+++ 
b/modules/hadoop/src/test/java/org/apache/ignite/loadtests/ggfs/GridGgfsPerformanceBenchmark.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.loadtests.ggfs;
+
+import org.apache.hadoop.conf.*;
+import org.apache.hadoop.fs.*;
+import org.apache.ignite.lang.*;
+import org.apache.ignite.internal.util.typedef.internal.*;
+import org.gridgain.testframework.*;
+import org.jetbrains.annotations.*;
+
+import java.io.*;
+import java.util.*;
+import java.util.concurrent.atomic.*;
+
+/**
+ * Tests write throughput.
+ */
+public class GridGgfsPerformanceBenchmark {
+    /** Path to test hadoop configuration. */
+    private static final String HADOOP_FS_CFG = 
"modules/core/src/test/config/hadoop/core-site.xml";
+
+    /** FS prefix. */
+    private static final String FS_PREFIX = "ggfs:///";
+
+    /** Test writes. */
+    private static final int OP_WRITE = 0;
+
+    /** Test reads. */
+    private static final int OP_READ = 1;
+
+    /**
+     * Starts benchmark.
+     *
+     * @param args Program arguments.
+     *      [0] - number of threads, default 1.
+     *      [1] - file length, default is 1GB.
+     *      [2] - stream buffer size, default is 1M.
+     *      [3] - fs config path.
+     * @throws Exception If failed.
+     */
+    public static void main(String[] args) throws Exception {
+        final int threadNum = intArgument(args, 0, 1);
+        final int op = intArgument(args, 1, OP_WRITE);
+        final long fileLen = longArgument(args, 2, 256 * 1024 * 1024);
+        final int bufSize = intArgument(args, 3, 128 * 1024);
+        final String cfgPath = argument(args, 4, HADOOP_FS_CFG);
+        final String fsPrefix = argument(args, 5, FS_PREFIX);
+        final short replication = (short)intArgument(args, 6, 3);
+
+        final Path ggfsHome = new Path(fsPrefix);
+
+        final FileSystem fs = ggfs(ggfsHome, cfgPath);
+
+        final AtomicLong progress = new AtomicLong();
+
+        final AtomicInteger idx = new AtomicInteger();
+
+        System.out.println("Warming up...");
+
+//        warmUp(fs, ggfsHome, op, fileLen);
+
+        System.out.println("Finished warm up.");
+
+        if (op == OP_READ) {
+            for (int i = 0; i < threadNum; i++)
+                benchmarkWrite(fs, new Path(ggfsHome, "in-" + i), fileLen, 
bufSize, replication, null);
+        }
+
+        long total = 0;
+
+        long start = System.currentTimeMillis();
+
+        IgniteFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new 
Runnable() {
+            @Override public void run() {
+                String fileIdx = op == OP_READ ? 
String.valueOf(idx.getAndIncrement()) : UUID.randomUUID().toString();
+
+                try {
+                    for (int i = 0; i < 200; i++) {
+                        if (op == OP_WRITE)
+                            benchmarkWrite(fs, new Path(ggfsHome, "out-" + 
fileIdx), fileLen, bufSize, replication,
+                                progress);
+                        else
+                            benchmarkRead(fs, new Path(ggfsHome, "in-" + 
fileIdx), bufSize, progress);
+                    }
+
+                    System.out.println("Finished " + (op == OP_WRITE ? 
"writing" : "reading") + " data.");
+                }
+                catch (Exception e) {
+                    System.out.println("Failed to process stream: " + e);
+
+                    e.printStackTrace();
+                }
+            }
+        }, threadNum, "test-runner");
+
+        while (!fut.isDone()) {
+            U.sleep(1000);
+
+            long written = progress.getAndSet(0);
+
+            total += written;
+
+            int mbytesPerSec = (int)(written / (1024 * 1024));
+
+            System.out.println((op == OP_WRITE ? "Write" : "Read") + " rate 
[threads=" + threadNum +
+                ", bufSize=" + bufSize + ", MBytes/s=" + mbytesPerSec + ']');
+        }
+
+        long now = System.currentTimeMillis();
+
+        System.out.println((op == OP_WRITE ? "Written" : "Read") + " " + total 
+ " bytes in " + (now - start) +
+            "ms, avg write rate is " + (total * 1000 / ((now - start) * 1024 * 
1024)) + "MBytes/s");
+
+        fs.close();
+    }
+
+    /**
+     * Warms up server side.
+     *
+     * @param fs File system.
+     * @param ggfsHome GGFS home.
+     * @throws Exception If failed.
+     */
+    private static void warmUp(FileSystem fs, Path ggfsHome, int op, long 
fileLen) throws Exception {
+        Path file = new Path(ggfsHome, "out-0");
+
+        benchmarkWrite(fs, file, fileLen, 1024 * 1024, (short)1, null);
+
+        for (int i = 0; i < 5; i++) {
+            if (op == OP_WRITE)
+                benchmarkWrite(fs, file, fileLen, 1024 * 1024, (short)1, null);
+            else
+                benchmarkRead(fs, file, 1024 * 1024, null);
+        }
+
+        fs.delete(file, true);
+    }
+
+    /**
+     * @param args Arguments.
+     * @param idx Index.
+     * @param dflt Default value.
+     * @return Argument value.
+     */
+    private static int intArgument(String[] args, int idx, int dflt) {
+        if (args.length <= idx)
+            return dflt;
+
+        try {
+            return Integer.parseInt(args[idx]);
+        }
+        catch (NumberFormatException ignored) {
+            return dflt;
+        }
+    }
+
+    /**
+     * @param args Arguments.
+     * @param idx Index.
+     * @param dflt Default value.
+     * @return Argument value.
+     */
+    private static long longArgument(String[] args, int idx, long dflt) {
+        if (args.length <= idx)
+            return dflt;
+
+        try {
+            return Long.parseLong(args[idx]);
+        }
+        catch (NumberFormatException ignored) {
+            return dflt;
+        }
+    }
+
+    /**
+     * @param args Arguments.
+     * @param idx Index.
+     * @param dflt Default value.
+     * @return Argument value.
+     */
+    private static String argument(String[] args, int idx, String dflt) {
+        if (args.length <= idx)
+            return dflt;
+
+        return args[idx];
+    }
+
+    /** {@inheritDoc} */
+    private static FileSystem ggfs(Path home, String cfgPath) throws 
IOException {
+        Configuration cfg = new Configuration();
+
+        cfg.addResource(U.resolveGridGainUrl(cfgPath));
+
+        return FileSystem.get(home.toUri(), cfg);
+    }
+
+    /**
+     * Tests stream write to specified file.
+     *
+     * @param file File to write to.
+     * @param len Length to write.
+     * @param bufSize Buffer size.
+     * @param replication Replication factor.
+     * @param progress Progress that will be incremented on each written chunk.
+     */
+    private static void benchmarkWrite(FileSystem fs, Path file, long len, int 
bufSize, short replication,
+        @Nullable AtomicLong progress) throws Exception {
+
+        try (FSDataOutputStream out = fs.create(file, true, bufSize, 
replication, fs.getDefaultBlockSize())) {
+            long written = 0;
+
+            byte[] data = new byte[bufSize];
+
+            while (written < len) {
+                int chunk = (int) Math.min(len - written, bufSize);
+
+                out.write(data, 0, chunk);
+
+                written += chunk;
+
+                if (progress != null)
+                    progress.addAndGet(chunk);
+            }
+
+            out.flush();
+        }
+        catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        }
+    }
+
+    /**
+     * Tests stream read from specified file.
+     *
+     * @param file File to write from
+     * @param bufSize Buffer size.
+     * @param progress Progress that will be incremented on each written chunk.
+     */
+    private static void benchmarkRead(FileSystem fs, Path file, int bufSize, 
@Nullable AtomicLong progress)
+        throws Exception {
+
+        try (FSDataInputStream in = fs.open(file, bufSize)) {
+            byte[] data = new byte[32 * bufSize];
+
+            while (true) {
+                int read = in.read(data);
+
+                if (read < 0)
+                    return;
+
+                if (progress != null)
+                    progress.addAndGet(read);
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/e1d8f69b/modules/hadoop/src/test/java/org/gridgain/loadtests/ggfs/GridGgfsNodeStartup.java
----------------------------------------------------------------------
diff --git 
a/modules/hadoop/src/test/java/org/gridgain/loadtests/ggfs/GridGgfsNodeStartup.java
 
b/modules/hadoop/src/test/java/org/gridgain/loadtests/ggfs/GridGgfsNodeStartup.java
deleted file mode 100644
index 8777555..0000000
--- 
a/modules/hadoop/src/test/java/org/gridgain/loadtests/ggfs/GridGgfsNodeStartup.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.gridgain.loadtests.ggfs;
-
-import org.apache.ignite.*;
-import org.apache.ignite.internal.util.typedef.*;
-
-import javax.swing.*;
-
-/**
- * Node startup for GGFS performance benchmark.
- */
-public class GridGgfsNodeStartup {
-    /**
-     * Start up an empty node with specified cache configuration.
-     *
-     * @param args Command line arguments, none required.
-     * @throws IgniteCheckedException If example execution failed.
-     */
-    public static void main(String[] args) throws IgniteCheckedException {
-        try (Ignite ignored = G.start("config/hadoop/default-config.xml")) {
-            // Wait until Ok is pressed.
-            JOptionPane.showMessageDialog(
-                null,
-                new JComponent[] {
-                    new JLabel("GridGain started."),
-                    new JLabel("Press OK to stop GridGain.")
-                },
-                "GridGain",
-                JOptionPane.INFORMATION_MESSAGE
-            );
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/e1d8f69b/modules/hadoop/src/test/java/org/gridgain/loadtests/ggfs/GridGgfsPerformanceBenchmark.java
----------------------------------------------------------------------
diff --git 
a/modules/hadoop/src/test/java/org/gridgain/loadtests/ggfs/GridGgfsPerformanceBenchmark.java
 
b/modules/hadoop/src/test/java/org/gridgain/loadtests/ggfs/GridGgfsPerformanceBenchmark.java
deleted file mode 100644
index b3ca2e0..0000000
--- 
a/modules/hadoop/src/test/java/org/gridgain/loadtests/ggfs/GridGgfsPerformanceBenchmark.java
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.gridgain.loadtests.ggfs;
-
-import org.apache.hadoop.conf.*;
-import org.apache.hadoop.fs.*;
-import org.apache.ignite.lang.*;
-import org.apache.ignite.internal.util.typedef.internal.*;
-import org.gridgain.testframework.*;
-import org.jetbrains.annotations.*;
-
-import java.io.*;
-import java.util.*;
-import java.util.concurrent.atomic.*;
-
-/**
- * Tests write throughput.
- */
-public class GridGgfsPerformanceBenchmark {
-    /** Path to test hadoop configuration. */
-    private static final String HADOOP_FS_CFG = 
"modules/core/src/test/config/hadoop/core-site.xml";
-
-    /** FS prefix. */
-    private static final String FS_PREFIX = "ggfs:///";
-
-    /** Test writes. */
-    private static final int OP_WRITE = 0;
-
-    /** Test reads. */
-    private static final int OP_READ = 1;
-
-    /**
-     * Starts benchmark.
-     *
-     * @param args Program arguments.
-     *      [0] - number of threads, default 1.
-     *      [1] - file length, default is 1GB.
-     *      [2] - stream buffer size, default is 1M.
-     *      [3] - fs config path.
-     * @throws Exception If failed.
-     */
-    public static void main(String[] args) throws Exception {
-        final int threadNum = intArgument(args, 0, 1);
-        final int op = intArgument(args, 1, OP_WRITE);
-        final long fileLen = longArgument(args, 2, 256 * 1024 * 1024);
-        final int bufSize = intArgument(args, 3, 128 * 1024);
-        final String cfgPath = argument(args, 4, HADOOP_FS_CFG);
-        final String fsPrefix = argument(args, 5, FS_PREFIX);
-        final short replication = (short)intArgument(args, 6, 3);
-
-        final Path ggfsHome = new Path(fsPrefix);
-
-        final FileSystem fs = ggfs(ggfsHome, cfgPath);
-
-        final AtomicLong progress = new AtomicLong();
-
-        final AtomicInteger idx = new AtomicInteger();
-
-        System.out.println("Warming up...");
-
-//        warmUp(fs, ggfsHome, op, fileLen);
-
-        System.out.println("Finished warm up.");
-
-        if (op == OP_READ) {
-            for (int i = 0; i < threadNum; i++)
-                benchmarkWrite(fs, new Path(ggfsHome, "in-" + i), fileLen, 
bufSize, replication, null);
-        }
-
-        long total = 0;
-
-        long start = System.currentTimeMillis();
-
-        IgniteFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new 
Runnable() {
-            @Override public void run() {
-                String fileIdx = op == OP_READ ? 
String.valueOf(idx.getAndIncrement()) : UUID.randomUUID().toString();
-
-                try {
-                    for (int i = 0; i < 200; i++) {
-                        if (op == OP_WRITE)
-                            benchmarkWrite(fs, new Path(ggfsHome, "out-" + 
fileIdx), fileLen, bufSize, replication,
-                                progress);
-                        else
-                            benchmarkRead(fs, new Path(ggfsHome, "in-" + 
fileIdx), bufSize, progress);
-                    }
-
-                    System.out.println("Finished " + (op == OP_WRITE ? 
"writing" : "reading") + " data.");
-                }
-                catch (Exception e) {
-                    System.out.println("Failed to process stream: " + e);
-
-                    e.printStackTrace();
-                }
-            }
-        }, threadNum, "test-runner");
-
-        while (!fut.isDone()) {
-            U.sleep(1000);
-
-            long written = progress.getAndSet(0);
-
-            total += written;
-
-            int mbytesPerSec = (int)(written / (1024 * 1024));
-
-            System.out.println((op == OP_WRITE ? "Write" : "Read") + " rate 
[threads=" + threadNum +
-                ", bufSize=" + bufSize + ", MBytes/s=" + mbytesPerSec + ']');
-        }
-
-        long now = System.currentTimeMillis();
-
-        System.out.println((op == OP_WRITE ? "Written" : "Read") + " " + total 
+ " bytes in " + (now - start) +
-            "ms, avg write rate is " + (total * 1000 / ((now - start) * 1024 * 
1024)) + "MBytes/s");
-
-        fs.close();
-    }
-
-    /**
-     * Warms up server side.
-     *
-     * @param fs File system.
-     * @param ggfsHome GGFS home.
-     * @throws Exception If failed.
-     */
-    private static void warmUp(FileSystem fs, Path ggfsHome, int op, long 
fileLen) throws Exception {
-        Path file = new Path(ggfsHome, "out-0");
-
-        benchmarkWrite(fs, file, fileLen, 1024 * 1024, (short)1, null);
-
-        for (int i = 0; i < 5; i++) {
-            if (op == OP_WRITE)
-                benchmarkWrite(fs, file, fileLen, 1024 * 1024, (short)1, null);
-            else
-                benchmarkRead(fs, file, 1024 * 1024, null);
-        }
-
-        fs.delete(file, true);
-    }
-
-    /**
-     * @param args Arguments.
-     * @param idx Index.
-     * @param dflt Default value.
-     * @return Argument value.
-     */
-    private static int intArgument(String[] args, int idx, int dflt) {
-        if (args.length <= idx)
-            return dflt;
-
-        try {
-            return Integer.parseInt(args[idx]);
-        }
-        catch (NumberFormatException ignored) {
-            return dflt;
-        }
-    }
-
-    /**
-     * @param args Arguments.
-     * @param idx Index.
-     * @param dflt Default value.
-     * @return Argument value.
-     */
-    private static long longArgument(String[] args, int idx, long dflt) {
-        if (args.length <= idx)
-            return dflt;
-
-        try {
-            return Long.parseLong(args[idx]);
-        }
-        catch (NumberFormatException ignored) {
-            return dflt;
-        }
-    }
-
-    /**
-     * @param args Arguments.
-     * @param idx Index.
-     * @param dflt Default value.
-     * @return Argument value.
-     */
-    private static String argument(String[] args, int idx, String dflt) {
-        if (args.length <= idx)
-            return dflt;
-
-        return args[idx];
-    }
-
-    /** {@inheritDoc} */
-    private static FileSystem ggfs(Path home, String cfgPath) throws 
IOException {
-        Configuration cfg = new Configuration();
-
-        cfg.addResource(U.resolveGridGainUrl(cfgPath));
-
-        return FileSystem.get(home.toUri(), cfg);
-    }
-
-    /**
-     * Tests stream write to specified file.
-     *
-     * @param file File to write to.
-     * @param len Length to write.
-     * @param bufSize Buffer size.
-     * @param replication Replication factor.
-     * @param progress Progress that will be incremented on each written chunk.
-     */
-    private static void benchmarkWrite(FileSystem fs, Path file, long len, int 
bufSize, short replication,
-        @Nullable AtomicLong progress) throws Exception {
-
-        try (FSDataOutputStream out = fs.create(file, true, bufSize, 
replication, fs.getDefaultBlockSize())) {
-            long written = 0;
-
-            byte[] data = new byte[bufSize];
-
-            while (written < len) {
-                int chunk = (int) Math.min(len - written, bufSize);
-
-                out.write(data, 0, chunk);
-
-                written += chunk;
-
-                if (progress != null)
-                    progress.addAndGet(chunk);
-            }
-
-            out.flush();
-        }
-        catch (Exception e) {
-            e.printStackTrace();
-            throw e;
-        }
-    }
-
-    /**
-     * Tests stream read from specified file.
-     *
-     * @param file File to write from
-     * @param bufSize Buffer size.
-     * @param progress Progress that will be incremented on each written chunk.
-     */
-    private static void benchmarkRead(FileSystem fs, Path file, int bufSize, 
@Nullable AtomicLong progress)
-        throws Exception {
-
-        try (FSDataInputStream in = fs.open(file, bufSize)) {
-            byte[] data = new byte[32 * bufSize];
-
-            while (true) {
-                int read = in.read(data);
-
-                if (read < 0)
-                    return;
-
-                if (progress != null)
-                    progress.addAndGet(read);
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/e1d8f69b/modules/indexing/src/test/java/org/apache/ignite/loadtests/h2indexing/GridTestEntity.java
----------------------------------------------------------------------
diff --git 
a/modules/indexing/src/test/java/org/apache/ignite/loadtests/h2indexing/GridTestEntity.java
 
b/modules/indexing/src/test/java/org/apache/ignite/loadtests/h2indexing/GridTestEntity.java
new file mode 100644
index 0000000..75a5a8f
--- /dev/null
+++ 
b/modules/indexing/src/test/java/org/apache/ignite/loadtests/h2indexing/GridTestEntity.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.loadtests.h2indexing;
+
+import org.apache.ignite.cache.query.*;
+
+import java.util.*;
+
+/**
+ * Test entity.
+ */
+public class GridTestEntity {
+    /** */
+    @GridCacheQuerySqlField(index = true)
+    private final String name;
+
+    /** */
+    @GridCacheQuerySqlField(index = false)
+    private final Date date;
+
+    /**
+     * Constructor.
+     *
+     * @param name Name.
+     * @param date Date.
+     */
+    @SuppressWarnings("AssignmentToDateFieldFromParameter")
+    public GridTestEntity(String name, Date date) {
+        this.name = name;
+        this.date = date;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object o) {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+
+        GridTestEntity that = (GridTestEntity) o;
+
+        return !(date != null ? !date.equals(that.date) : that.date != null) &&
+            !(name != null ? !name.equals(that.name) : that.name != null);
+
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        int res = name != null ? name.hashCode() : 0;
+
+        res = 31 * res + (date != null ? date.hashCode() : 0);
+
+        return res;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/e1d8f69b/modules/indexing/src/test/java/org/apache/ignite/loadtests/h2indexing/GridTreeBenchmark.java
----------------------------------------------------------------------
diff --git 
a/modules/indexing/src/test/java/org/apache/ignite/loadtests/h2indexing/GridTreeBenchmark.java
 
b/modules/indexing/src/test/java/org/apache/ignite/loadtests/h2indexing/GridTreeBenchmark.java
new file mode 100644
index 0000000..7e0b6e8
--- /dev/null
+++ 
b/modules/indexing/src/test/java/org/apache/ignite/loadtests/h2indexing/GridTreeBenchmark.java
@@ -0,0 +1,276 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.loadtests.h2indexing;
+
+import org.apache.ignite.internal.util.snaptree.*;
+
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.*;
+
+/**
+ * NavigableMaps PUT benchmark.
+ */
+public class GridTreeBenchmark {
+    /** */
+    private static final int PUTS = 8000000;
+
+    /** */
+    private static final int THREADS = 8;
+
+    /** */
+    private static final int ITERATIONS = PUTS / THREADS;
+
+    /**
+     * Main method.
+     *
+     * @param args Command line args (not used).
+     * @throws BrokenBarrierException If failed.
+     * @throws InterruptedException If failed.
+     */
+    public static void main(String... args) throws BrokenBarrierException, 
InterruptedException {
+        doTestMaps();
+    }
+
+    /**
+     * @throws BrokenBarrierException If failed.
+     * @throws InterruptedException If failed.
+     */
+    private static void doTestAtomicInt() throws BrokenBarrierException, 
InterruptedException {
+        final AtomicInteger[] cnts = new AtomicInteger[8];
+
+        for (int i = 0; i < cnts.length; i++)
+            cnts[i] = new AtomicInteger();
+
+        final Thread[] ths = new Thread[THREADS];
+
+        final CyclicBarrier barrier = new CyclicBarrier(THREADS + 1);
+
+        final AtomicInteger cnt = new AtomicInteger();
+
+        for (int i = 0; i < ths.length; i++) {
+            ths[i] = new Thread(new Runnable() {
+                @Override public void run() {
+                    int idx = cnt.getAndIncrement();
+
+                    AtomicInteger x = cnts[idx % cnts.length];
+
+                    try {
+                        barrier.await();
+                    }
+                    catch (Exception e) {
+                        throw new IllegalStateException(e);
+                    }
+
+                    for (int i = 0; i < ITERATIONS; i++)
+                        x.incrementAndGet();
+                }
+            });
+
+            ths[i].start();
+        }
+
+        barrier.await();
+
+        long start = System.currentTimeMillis();
+
+        for (Thread t : ths)
+            t.join();
+
+        long time = System.currentTimeMillis() - start;
+
+        System.out.println(time);
+
+    }
+
+    /**
+     * @throws BrokenBarrierException If failed.
+     * @throws InterruptedException If failed.
+     */
+    private static void doTestMaps() throws BrokenBarrierException, 
InterruptedException {
+        final UUID[] data = generate();
+
+        @SuppressWarnings("unchecked")
+        final Map<UUID, UUID>[] maps = new Map[4];
+
+        for (int i = 0; i < maps.length; i++)
+            maps[i] =
+                new SnapTreeMap<>();
+
+
+        final Thread[] ths = new Thread[THREADS];
+
+        final CyclicBarrier barrier = new CyclicBarrier(THREADS + 1);
+
+        final AtomicInteger cnt = new AtomicInteger();
+
+        for (int i = 0; i < ths.length; i++) {
+            ths[i] = new Thread(new Runnable() {
+                @Override public void run() {
+                    int idx = cnt.getAndIncrement();
+
+                    int off = idx * ITERATIONS;
+
+                    Map<UUID, UUID> map = maps[idx % maps.length];
+
+                    try {
+                        barrier.await();
+                    }
+                    catch (Exception e) {
+                        throw new IllegalStateException(e);
+                    }
+
+                    for (int i = 0; i < ITERATIONS; i++) {
+                        UUID id = data[off + i];
+
+                        id = map.put(id, id);
+
+                        assert id == null;
+                    }
+                }
+            });
+
+            ths[i].start();
+        }
+
+        System.out.println("Sleep");
+        Thread.sleep(10000);
+
+        System.out.println("Go");
+        barrier.await();
+
+        long start = System.currentTimeMillis();
+
+        for (Thread t : ths)
+            t.join();
+
+        long time = System.currentTimeMillis() - start;
+
+        System.out.println(time);
+    }
+
+    /**
+     * @throws BrokenBarrierException If failed.
+     * @throws InterruptedException If failed.
+     */
+    private static void doBenchmark() throws BrokenBarrierException, 
InterruptedException {
+        int attemts = 20;
+        int warmups = 10;
+
+        long snapTreeTime = 0;
+        long skipListTime = 0;
+
+        for (int i = 0; i < attemts; i++) {
+            ConcurrentNavigableMap<UUID, UUID> skipList = new 
ConcurrentSkipListMap<>();
+            ConcurrentNavigableMap<UUID, UUID> snapTree = new SnapTreeMap<>();
+
+            UUID[] ids = generate();
+
+            boolean warmup = i < warmups;
+
+            snapTreeTime += doTest(snapTree, ids, warmup);
+            skipListTime += doTest(skipList, ids, warmup);
+
+            assert skipList.size() == snapTree.size();
+
+            Iterator<UUID> snapIt = snapTree.keySet().iterator();
+            Iterator<UUID> listIt = skipList.keySet().iterator();
+
+            for (int x = 0, len = skipList.size(); x < len; x++)
+                assert snapIt.next() == listIt.next();
+
+            System.out.println(i + " ==================");
+        }
+
+        attemts -= warmups;
+
+        System.out.println("Avg for GridSnapTreeMap: " + (snapTreeTime / 
attemts) + " ms");
+        System.out.println("Avg for ConcurrentSkipListMap: " + (skipListTime / 
attemts) + " ms");
+     }
+
+    /**
+     * @return UUIDs.
+     */
+    private static UUID[] generate() {
+        UUID[] ids = new UUID[ITERATIONS * THREADS];
+
+        for (int i = 0; i < ids.length; i++)
+            ids[i] = UUID.randomUUID();
+
+        return ids;
+    }
+
+    /**
+     * @param tree Tree.
+     * @param data Data.
+     * @param warmup Warmup.
+     * @return Time.
+     * @throws BrokenBarrierException If failed.
+     * @throws InterruptedException If failed.
+     */
+    private static long doTest(final ConcurrentNavigableMap<UUID, UUID> tree, 
final UUID[] data, boolean warmup)
+        throws BrokenBarrierException, InterruptedException {
+        Thread[] ths = new Thread[THREADS];
+
+        final CyclicBarrier barrier = new CyclicBarrier(THREADS + 1);
+
+        final AtomicInteger cnt = new AtomicInteger();
+
+        for (int i = 0; i < ths.length; i++) {
+            ths[i] = new Thread(new Runnable() {
+                @Override public void run() {
+                    int off = cnt.getAndIncrement() * ITERATIONS;
+
+                    try {
+                        barrier.await();
+                    }
+                    catch (Exception e) {
+                        throw new IllegalStateException(e);
+                    }
+
+                    for (int i = 0; i < ITERATIONS; i++) {
+                        UUID id = data[off + i];
+
+                        id = tree.put(id, id);
+
+                        assert id == null;
+                    }
+                }
+            });
+
+            ths[i].start();
+        }
+
+        barrier.await();
+
+        long start = System.currentTimeMillis();
+
+        for (Thread t : ths)
+            t.join();
+
+        long time = System.currentTimeMillis() - start;
+
+        if (!warmup) {
+            System.out.println(tree.getClass().getSimpleName() + "  " + time + 
" ms");
+
+            return time;
+        }
+
+        return 0;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/e1d8f69b/modules/indexing/src/test/java/org/gridgain/loadtests/h2indexing/GridTestEntity.java
----------------------------------------------------------------------
diff --git 
a/modules/indexing/src/test/java/org/gridgain/loadtests/h2indexing/GridTestEntity.java
 
b/modules/indexing/src/test/java/org/gridgain/loadtests/h2indexing/GridTestEntity.java
deleted file mode 100644
index 2e826ef..0000000
--- 
a/modules/indexing/src/test/java/org/gridgain/loadtests/h2indexing/GridTestEntity.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.gridgain.loadtests.h2indexing;
-
-import org.apache.ignite.cache.query.*;
-
-import java.util.*;
-
-/**
- * Test entity.
- */
-public class GridTestEntity {
-    /** */
-    @GridCacheQuerySqlField(index = true)
-    private final String name;
-
-    /** */
-    @GridCacheQuerySqlField(index = false)
-    private final Date date;
-
-    /**
-     * Constructor.
-     *
-     * @param name Name.
-     * @param date Date.
-     */
-    @SuppressWarnings("AssignmentToDateFieldFromParameter")
-    public GridTestEntity(String name, Date date) {
-        this.name = name;
-        this.date = date;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean equals(Object o) {
-        if (this == o) return true;
-        if (o == null || getClass() != o.getClass()) return false;
-
-        GridTestEntity that = (GridTestEntity) o;
-
-        return !(date != null ? !date.equals(that.date) : that.date != null) &&
-            !(name != null ? !name.equals(that.name) : that.name != null);
-
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        int res = name != null ? name.hashCode() : 0;
-
-        res = 31 * res + (date != null ? date.hashCode() : 0);
-
-        return res;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/e1d8f69b/modules/indexing/src/test/java/org/gridgain/loadtests/h2indexing/GridTreeBenchmark.java
----------------------------------------------------------------------
diff --git 
a/modules/indexing/src/test/java/org/gridgain/loadtests/h2indexing/GridTreeBenchmark.java
 
b/modules/indexing/src/test/java/org/gridgain/loadtests/h2indexing/GridTreeBenchmark.java
deleted file mode 100644
index 1a603db..0000000
--- 
a/modules/indexing/src/test/java/org/gridgain/loadtests/h2indexing/GridTreeBenchmark.java
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.gridgain.loadtests.h2indexing;
-
-import org.apache.ignite.internal.util.snaptree.*;
-
-import java.util.*;
-import java.util.concurrent.*;
-import java.util.concurrent.atomic.*;
-
-/**
- * NavigableMaps PUT benchmark.
- */
-public class GridTreeBenchmark {
-    /** */
-    private static final int PUTS = 8000000;
-
-    /** */
-    private static final int THREADS = 8;
-
-    /** */
-    private static final int ITERATIONS = PUTS / THREADS;
-
-    /**
-     * Main method.
-     *
-     * @param args Command line args (not used).
-     * @throws BrokenBarrierException If failed.
-     * @throws InterruptedException If failed.
-     */
-    public static void main(String... args) throws BrokenBarrierException, 
InterruptedException {
-        doTestMaps();
-    }
-
-    /**
-     * @throws BrokenBarrierException If failed.
-     * @throws InterruptedException If failed.
-     */
-    private static void doTestAtomicInt() throws BrokenBarrierException, 
InterruptedException {
-        final AtomicInteger[] cnts = new AtomicInteger[8];
-
-        for (int i = 0; i < cnts.length; i++)
-            cnts[i] = new AtomicInteger();
-
-        final Thread[] ths = new Thread[THREADS];
-
-        final CyclicBarrier barrier = new CyclicBarrier(THREADS + 1);
-
-        final AtomicInteger cnt = new AtomicInteger();
-
-        for (int i = 0; i < ths.length; i++) {
-            ths[i] = new Thread(new Runnable() {
-                @Override public void run() {
-                    int idx = cnt.getAndIncrement();
-
-                    AtomicInteger x = cnts[idx % cnts.length];
-
-                    try {
-                        barrier.await();
-                    }
-                    catch (Exception e) {
-                        throw new IllegalStateException(e);
-                    }
-
-                    for (int i = 0; i < ITERATIONS; i++)
-                        x.incrementAndGet();
-                }
-            });
-
-            ths[i].start();
-        }
-
-        barrier.await();
-
-        long start = System.currentTimeMillis();
-
-        for (Thread t : ths)
-            t.join();
-
-        long time = System.currentTimeMillis() - start;
-
-        System.out.println(time);
-
-    }
-
-    /**
-     * @throws BrokenBarrierException If failed.
-     * @throws InterruptedException If failed.
-     */
-    private static void doTestMaps() throws BrokenBarrierException, 
InterruptedException {
-        final UUID[] data = generate();
-
-        @SuppressWarnings("unchecked")
-        final Map<UUID, UUID>[] maps = new Map[4];
-
-        for (int i = 0; i < maps.length; i++)
-            maps[i] =
-                new SnapTreeMap<>();
-
-
-        final Thread[] ths = new Thread[THREADS];
-
-        final CyclicBarrier barrier = new CyclicBarrier(THREADS + 1);
-
-        final AtomicInteger cnt = new AtomicInteger();
-
-        for (int i = 0; i < ths.length; i++) {
-            ths[i] = new Thread(new Runnable() {
-                @Override public void run() {
-                    int idx = cnt.getAndIncrement();
-
-                    int off = idx * ITERATIONS;
-
-                    Map<UUID, UUID> map = maps[idx % maps.length];
-
-                    try {
-                        barrier.await();
-                    }
-                    catch (Exception e) {
-                        throw new IllegalStateException(e);
-                    }
-
-                    for (int i = 0; i < ITERATIONS; i++) {
-                        UUID id = data[off + i];
-
-                        id = map.put(id, id);
-
-                        assert id == null;
-                    }
-                }
-            });
-
-            ths[i].start();
-        }
-
-        System.out.println("Sleep");
-        Thread.sleep(10000);
-
-        System.out.println("Go");
-        barrier.await();
-
-        long start = System.currentTimeMillis();
-
-        for (Thread t : ths)
-            t.join();
-
-        long time = System.currentTimeMillis() - start;
-
-        System.out.println(time);
-    }
-
-    /**
-     * @throws BrokenBarrierException If failed.
-     * @throws InterruptedException If failed.
-     */
-    private static void doBenchmark() throws BrokenBarrierException, 
InterruptedException {
-        int attemts = 20;
-        int warmups = 10;
-
-        long snapTreeTime = 0;
-        long skipListTime = 0;
-
-        for (int i = 0; i < attemts; i++) {
-            ConcurrentNavigableMap<UUID, UUID> skipList = new 
ConcurrentSkipListMap<>();
-            ConcurrentNavigableMap<UUID, UUID> snapTree = new SnapTreeMap<>();
-
-            UUID[] ids = generate();
-
-            boolean warmup = i < warmups;
-
-            snapTreeTime += doTest(snapTree, ids, warmup);
-            skipListTime += doTest(skipList, ids, warmup);
-
-            assert skipList.size() == snapTree.size();
-
-            Iterator<UUID> snapIt = snapTree.keySet().iterator();
-            Iterator<UUID> listIt = skipList.keySet().iterator();
-
-            for (int x = 0, len = skipList.size(); x < len; x++)
-                assert snapIt.next() == listIt.next();
-
-            System.out.println(i + " ==================");
-        }
-
-        attemts -= warmups;
-
-        System.out.println("Avg for GridSnapTreeMap: " + (snapTreeTime / 
attemts) + " ms");
-        System.out.println("Avg for ConcurrentSkipListMap: " + (skipListTime / 
attemts) + " ms");
-     }
-
-    /**
-     * @return UUIDs.
-     */
-    private static UUID[] generate() {
-        UUID[] ids = new UUID[ITERATIONS * THREADS];
-
-        for (int i = 0; i < ids.length; i++)
-            ids[i] = UUID.randomUUID();
-
-        return ids;
-    }
-
-    /**
-     * @param tree Tree.
-     * @param data Data.
-     * @param warmup Warmup.
-     * @return Time.
-     * @throws BrokenBarrierException If failed.
-     * @throws InterruptedException If failed.
-     */
-    private static long doTest(final ConcurrentNavigableMap<UUID, UUID> tree, 
final UUID[] data, boolean warmup)
-        throws BrokenBarrierException, InterruptedException {
-        Thread[] ths = new Thread[THREADS];
-
-        final CyclicBarrier barrier = new CyclicBarrier(THREADS + 1);
-
-        final AtomicInteger cnt = new AtomicInteger();
-
-        for (int i = 0; i < ths.length; i++) {
-            ths[i] = new Thread(new Runnable() {
-                @Override public void run() {
-                    int off = cnt.getAndIncrement() * ITERATIONS;
-
-                    try {
-                        barrier.await();
-                    }
-                    catch (Exception e) {
-                        throw new IllegalStateException(e);
-                    }
-
-                    for (int i = 0; i < ITERATIONS; i++) {
-                        UUID id = data[off + i];
-
-                        id = tree.put(id, id);
-
-                        assert id == null;
-                    }
-                }
-            });
-
-            ths[i].start();
-        }
-
-        barrier.await();
-
-        long start = System.currentTimeMillis();
-
-        for (Thread t : ths)
-            t.join();
-
-        long time = System.currentTimeMillis() - start;
-
-        if (!warmup) {
-            System.out.println(tree.getClass().getSimpleName() + "  " + time + 
" ms");
-
-            return time;
-        }
-
-        return 0;
-    }
-}

Reply via email to