# ignite-596 Add missed scala examples and remove unnecessary scala examples.


Project: http://git-wip-us.apache.org/repos/asf/incubator-ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ignite/commit/41a5bf67
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ignite/tree/41a5bf67
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ignite/diff/41a5bf67

Branch: refs/heads/ignite-596
Commit: 41a5bf67b519b6fc2eb7a7d09428a9e593096021
Parents: f9a4dd7
Author: Andrey <anovi...@gridgain.com>
Authored: Wed May 20 16:05:56 2015 +0700
Committer: Andrey <anovi...@gridgain.com>
Committed: Wed May 20 16:05:56 2015 +0700

----------------------------------------------------------------------
 .../example-scalar-hibernate-L2-cache.xml       |  66 ++++
 .../computegrid/ComputeTaskMapExample.java      |   2 +-
 .../examples/datagrid/CacheQueryExample.java    |   3 +-
 .../starschema/CacheStarSchemaExample.java      |   1 -
 .../datagrid/store/auto/DbH2ServerStartup.java  |  28 +-
 .../misc/deployment/DeploymentExample.java      |   2 +-
 .../examples/ScalarCacheAffinityExample.scala   | 115 -------
 .../scalar/examples/ScalarCacheExample.scala    | 117 -------
 .../ScalarCachePopularNumbersExample.scala      | 151 ---------
 .../examples/ScalarCacheQueryExample.scala      | 192 ------------
 .../scalar/examples/ScalarClosureExample.scala  | 100 ------
 .../examples/ScalarContinuationExample.scala    | 175 -----------
 .../examples/ScalarCreditRiskExample.scala      | 249 ---------------
 .../scalar/examples/ScalarJvmCloudExample.scala |  95 ------
 .../scalar/examples/ScalarPingPongExample.scala | 160 ----------
 .../scalar/examples/ScalarPrimeExample.scala    | 134 --------
 .../scalar/examples/ScalarScheduleExample.scala |  66 ----
 .../examples/ScalarSnowflakeSchemaExample.scala | 312 ------------------
 .../scalar/examples/ScalarTaskExample.scala     |  55 ----
 .../examples/ScalarWorldShortestMapReduce.scala |  42 ---
 .../computegrid/ScalarComputeAsyncExample.scala |  59 ++++
 .../ScalarComputeBroadcastExample.scala         | 107 +++++++
 .../ScalarComputeCallableExample.scala          |  73 +++++
 .../ScalarComputeClosureExample.scala           |  64 ++++
 .../ScalarComputeContinuousMapperExample.scala  | 144 +++++++++
 ...larComputeFibonacciContinuationExample.scala | 159 ++++++++++
 .../ScalarComputeReduceExample.scala            |  83 +++++
 .../ScalarComputeRunnableExample.scala          |  58 ++++
 .../ScalarComputeTaskMapExample.scala           | 108 +++++++
 .../ScalarComputeTaskSplitExample.scala         |  97 ++++++
 .../ScalarComputeClusterGroupExample.scala      |  88 ++++++
 .../failover/ScalarComputeFailoverExample.scala | 124 ++++++++
 .../ScalarComputeCreditRiskExample.scala        | 231 ++++++++++++++
 .../datagrid/ScalarCacheAffinityExample.scala   | 117 +++++++
 .../datagrid/ScalarCacheApiExample.scala        | 101 ++++++
 .../datagrid/ScalarCacheAsyncApiExample.scala   |  75 +++++
 .../ScalarCacheContinuousQueryExample.scala     | 108 +++++++
 .../ScalarCacheDataStreamerExample.scala        |  88 ++++++
 .../datagrid/ScalarCacheEventsExample.scala     |  89 ++++++
 .../datagrid/ScalarCachePutGetExample.scala     | 106 +++++++
 .../datagrid/ScalarCacheQueryExample.scala      | 305 ++++++++++++++++++
 .../ScalarCacheTransactionExample.scala         | 127 ++++++++
 .../examples/datagrid/hibernate/Post.scala      | 115 +++++++
 .../ScalarHibernateL2CacheExample.scala         | 242 ++++++++++++++
 .../examples/datagrid/hibernate/User.scala      | 136 ++++++++
 .../starschema/ScalarStarSchemaExample.scala    | 314 +++++++++++++++++++
 .../scalar/examples/datagrid/store/Person.scala |  93 ++++++
 .../datagrid/store/auto/CacheConfig.scala       |  77 +++++
 .../auto/ScalarCacheAutoStoreExample.scala      |  94 ++++++
 .../ScalarCacheAutoStoreLoadDataExample.scala   |  84 +++++
 .../dummy/ScalarCacheDummyPersonStore.scala     | 101 ++++++
 .../dummy/ScalarCacheDummyStoreExample.scala    | 133 ++++++++
 .../datagrid/store/hibernate/Person.hbm.xml     |  36 +++
 .../ScalarCacheHibernatePersonStore.scala       | 268 ++++++++++++++++
 .../ScalarCacheHibernateStoreExample.scala      | 133 ++++++++
 .../datagrid/store/hibernate/hibernate.cfg.xml  |  43 +++
 .../store/jdbc/ScalarCacheJdbcPersonStore.scala | 311 ++++++++++++++++++
 .../jdbc/ScalarCacheJdbcStoreExample.scala      | 159 ++++++++++
 .../ScalarExecutorServiceExample.scala          |  64 ++++
 .../ScalarIgniteAtomicLongExample.scala         |  74 +++++
 .../ScalarIgniteAtomicSequenceExample.scala     |  69 ++++
 .../ScalarIgniteAtomicStampedExample.scala      |  96 ++++++
 .../ScalarIgniteCountDownLatchExample.scala     |  75 +++++
 .../ScalarIgniteQueueExample.scala              | 180 +++++++++++
 .../datastructures/ScalarIgniteSetExample.scala | 154 +++++++++
 .../examples/events/ScalarEventsExample.scala   | 123 ++++++++
 .../examples/igfs/ScalarIgfsExample.scala       | 285 +++++++++++++++++
 .../igfs/ScalarIgfsMapReduceExample.scala       | 231 ++++++++++++++
 .../messaging/ScalarMessagingExample.scala      | 166 ++++++++++
 .../ScalarMessagingPingPongExample.scala        | 179 +++++++++++
 ...larMessagingPingPongListenActorExample.scala | 161 ++++++++++
 .../memcache/ScalarMemcacheRestExample.scala    | 120 +++++++
 .../deployment/ScalarDeploymentExample.scala    | 126 ++++++++
 .../misc/lifecycle/ScalarLifecycleExample.scala |  90 ++++++
 .../schedule/ScalarComputeScheduleExample.scala |  73 +++++
 .../springbean/ScalarSpringBeanExample.scala    |  80 +++++
 .../examples/misc/springbean/spring-bean.xml    |  72 +++++
 .../servicegrid/ScalarServicesExample.scala     | 147 +++++++++
 .../ScalarStreamTransformerExample.scala        | 104 ++++++
 .../streaming/ScalarStreamVisitorExample.scala  | 160 ++++++++++
 .../tests/examples/ScalarExamplesSelfTest.scala | 280 +++++++++++++++--
 .../scala/org/apache/ignite/scalar/scalar.scala | 125 +++++++-
 82 files changed, 8140 insertions(+), 2009 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/config/hibernate/example-scalar-hibernate-L2-cache.xml
----------------------------------------------------------------------
diff --git a/examples/config/hibernate/example-scalar-hibernate-L2-cache.xml 
b/examples/config/hibernate/example-scalar-hibernate-L2-cache.xml
new file mode 100644
index 0000000..80bb653
--- /dev/null
+++ b/examples/config/hibernate/example-scalar-hibernate-L2-cache.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  ~ /*
+  ~  * Licensed to the Apache Software Foundation (ASF) under one or more
+  ~  * contributor license agreements.  See the NOTICE file distributed with
+  ~  * this work for additional information regarding copyright ownership.
+  ~  * The ASF licenses this file to You under the Apache License, Version 2.0
+  ~  * (the "License"); you may not use this file except in compliance with
+  ~  * the License.  You may obtain a copy of the License at
+  ~  *
+  ~  *      http://www.apache.org/licenses/LICENSE-2.0
+  ~  *
+  ~  * Unless required by applicable law or agreed to in writing, software
+  ~  * distributed under the License is distributed on an "AS IS" BASIS,
+  ~  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~  * See the License for the specific language governing permissions and
+  ~  * limitations under the License.
+  ~  */
+  -->
+
+
+<!DOCTYPE hibernate-configuration PUBLIC
+    "-//Hibernate/Hibernate Configuration DTD 3.0//EN"
+    "http://www.hibernate.org/dtd/hibernate-configuration-3.0.dtd";>
+
+<!--
+    Configuration file for HibernateL2CacheExample.
+-->
+
+<hibernate-configuration>
+    <session-factory>
+        <!-- Database connection settings -->
+        <property 
name="connection.url">jdbc:h2:mem:example;DB_CLOSE_DELAY=-1</property>
+
+        <!-- Drop and re-create the database schema on startup. -->
+        <property name="hbm2ddl.auto">create</property>
+
+        <!-- Enable L2 cache. -->
+        <property name="cache.use_second_level_cache">true</property>
+
+        <!-- Enable query cache. -->
+        <property name="cache.use_query_cache">true</property>
+
+        <!-- Generate L2 cache statistics. -->
+        <property name="generate_statistics">true</property>
+
+        <!-- Specify Ignite as L2 cache provider. -->
+        <property 
name="cache.region.factory_class">org.apache.ignite.cache.hibernate.HibernateRegionFactory</property>
+
+        <!-- Specify connection release mode. -->
+        <property name="connection.release_mode">on_close</property>
+
+        <!-- Set default L2 cache access type. -->
+        <property 
name="org.apache.ignite.hibernate.default_access_type">READ_ONLY</property>
+
+        <!-- Specify the entity classes for mapping. -->
+        <mapping 
class="org.apache.ignite.scalar.examples.datagrid.hibernate.User"/>
+        <mapping 
class="org.apache.ignite.scalar.examples.datagrid.hibernate.Post"/>
+
+        <!-- Per-class L2 cache settings. -->
+        <class-cache 
class="org.apache.ignite.scalar.examples.datagrid.hibernate.User" 
usage="read-only"/>
+        <class-cache 
class="org.apache.ignite.scalar.examples.datagrid.hibernate.Post" 
usage="read-only"/>
+        <collection-cache 
collection="org.apache.ignite.scalar.examples.datagrid.hibernate.User.posts" 
usage="read-only"/>
+    </session-factory>
+</hibernate-configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/java/org/apache/ignite/examples/computegrid/ComputeTaskMapExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/ignite/examples/computegrid/ComputeTaskMapExample.java
 
b/examples/src/main/java/org/apache/ignite/examples/computegrid/ComputeTaskMapExample.java
index a06299f..9ca51f7 100644
--- 
a/examples/src/main/java/org/apache/ignite/examples/computegrid/ComputeTaskMapExample.java
+++ 
b/examples/src/main/java/org/apache/ignite/examples/computegrid/ComputeTaskMapExample.java
@@ -79,7 +79,7 @@ public class ComputeTaskMapExample {
 
             Iterator<ClusterNode> it = nodes.iterator();
 
-            for (final String word : arg.split(" ")) {
+            for (final String word : words) {
                 // If we used all nodes, restart the iterator.
                 if (!it.hasNext())
                     it = nodes.iterator();

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/java/org/apache/ignite/examples/datagrid/CacheQueryExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/ignite/examples/datagrid/CacheQueryExample.java
 
b/examples/src/main/java/org/apache/ignite/examples/datagrid/CacheQueryExample.java
index 6d6126d..cca767e 100644
--- 
a/examples/src/main/java/org/apache/ignite/examples/datagrid/CacheQueryExample.java
+++ 
b/examples/src/main/java/org/apache/ignite/examples/datagrid/CacheQueryExample.java
@@ -70,8 +70,7 @@ public class CacheQueryExample {
      */
     public static void main(String[] args) throws Exception {
         try (Ignite ignite = 
Ignition.start("examples/config/example-ignite.xml")) {
-            System.out.println();
-            System.out.println(">>> Cache query example started.");
+            print("Cache query example started.");
 
             CacheConfiguration<UUID, Organization> orgCacheCfg = new 
CacheConfiguration<>(ORG_CACHE);
 

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/java/org/apache/ignite/examples/datagrid/starschema/CacheStarSchemaExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/ignite/examples/datagrid/starschema/CacheStarSchemaExample.java
 
b/examples/src/main/java/org/apache/ignite/examples/datagrid/starschema/CacheStarSchemaExample.java
index bef2457..8c2f7c8 100644
--- 
a/examples/src/main/java/org/apache/ignite/examples/datagrid/starschema/CacheStarSchemaExample.java
+++ 
b/examples/src/main/java/org/apache/ignite/examples/datagrid/starschema/CacheStarSchemaExample.java
@@ -69,7 +69,6 @@ public class CacheStarSchemaExample {
      */
     public static void main(String[] args) {
         try (Ignite ignite = 
Ignition.start("examples/config/example-ignite.xml")) {
-
             System.out.println();
             System.out.println(">>> Cache star schema example started.");
 

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/java/org/apache/ignite/examples/datagrid/store/auto/DbH2ServerStartup.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/ignite/examples/datagrid/store/auto/DbH2ServerStartup.java
 
b/examples/src/main/java/org/apache/ignite/examples/datagrid/store/auto/DbH2ServerStartup.java
index 96e4622..908aad3 100644
--- 
a/examples/src/main/java/org/apache/ignite/examples/datagrid/store/auto/DbH2ServerStartup.java
+++ 
b/examples/src/main/java/org/apache/ignite/examples/datagrid/store/auto/DbH2ServerStartup.java
@@ -48,9 +48,25 @@ public class DbH2ServerStartup {
      * @throws IgniteException If start H2 database TCP server failed.
      */
     public static void main(String[] args) throws IgniteException {
+        if (startServer() != null) {
+            try {
+                do {
+                    System.out.println("Type 'q' and press 'Enter' to stop H2 
TCP server...");
+                }
+                while ('q' != System.in.read());
+            }
+            catch (IOException ignored) {
+                // No-op.
+            }
+        }
+    }
+
+    public static Server startServer() {
+        Server srv = null;
+
         try {
             // Start H2 database TCP server in order to access sample 
in-memory database from other processes.
-            Server.createTcpServer("-tcpDaemon").start();
+            srv = Server.createTcpServer("-tcpDaemon").start();
 
             // Try to connect to database TCP server.
             JdbcConnectionPool dataSrc = 
JdbcConnectionPool.create("jdbc:h2:tcp://localhost/mem:ExampleDb", "sa", "");
@@ -65,14 +81,6 @@ public class DbH2ServerStartup {
             throw new IgniteException("Failed to start database TCP server", 
e);
         }
 
-        try {
-            do {
-                System.out.println("Type 'q' and press 'Enter' to stop H2 TCP 
server...");
-            }
-            while ('q' != System.in.read());
-        }
-        catch (IOException ignored) {
-            // No-op.
-        }
+        return srv;
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/java/org/apache/ignite/examples/misc/deployment/DeploymentExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/ignite/examples/misc/deployment/DeploymentExample.java
 
b/examples/src/main/java/org/apache/ignite/examples/misc/deployment/DeploymentExample.java
index 5336620..f7779ca 100644
--- 
a/examples/src/main/java/org/apache/ignite/examples/misc/deployment/DeploymentExample.java
+++ 
b/examples/src/main/java/org/apache/ignite/examples/misc/deployment/DeploymentExample.java
@@ -102,7 +102,7 @@ public final class DeploymentExample {
      * default to the task class name.
      */
     @ComputeTaskName(TASK_NAME)
-    public static class ExampleTask extends ComputeTaskSplitAdapter<String, 
Object> {
+    static class ExampleTask extends ComputeTaskSplitAdapter<String, Object> {
         /** {@inheritDoc} */
         @Override protected Collection<? extends ComputeJob> split(int 
clusterSize, String arg) {
             Collection<ComputeJob> jobs = new ArrayList<>(clusterSize);

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinityExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinityExample.scala
 
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinityExample.scala
deleted file mode 100644
index fbf66bc..0000000
--- 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheAffinityExample.scala
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.scalar.examples
-
-import org.apache.ignite.IgniteCache
-import org.apache.ignite.scalar.scalar
-import org.apache.ignite.scalar.scalar._
-
-import scala.collection.JavaConversions._
-
-/**
- * This example demonstrates the simplest code that populates the distributed 
cache
- * and co-locates simple closure execution with each key. The goal of this 
particular
- * example is to provide the simplest code example of this logic.
- * <p/>
- * Remote nodes should always be started with special configuration file which
- * enables P2P class loading: `'ignite.{sh|bat} 
examples/config/example-ignite.xml'`.
- * <p/>
- * Alternatively you can run `ExampleNodeStartup` in another JVM which will
- * start node with `examples/config/example-ignite.xml` configuration.
- */
-object ScalarCacheAffinityExample extends App {
-    /** Configuration file name. */
-    private val CONFIG = "examples/config/example-ignite.xml"
-
-    /** Name of cache. */
-    private val NAME = ScalarCacheAffinityExample.getClass.getSimpleName
-
-    /** Number of keys. */
-    private val KEY_CNT = 20
-
-    /** Type alias. */
-    type Cache = IgniteCache[Int, String]
-
-    /*
-     * Note that in case of `LOCAL` configuration,
-     * since there is no distribution, values may come back as `nulls`.
-     */
-    scalar(CONFIG) {
-        val cache = createCache$[Int, String](NAME)
-
-        try {
-            populate (cache)
-
-            visitUsingAffinityRun(cache)
-
-            visitUsingMapKeysToNodes(cache)
-        }
-        finally {
-            cache.close()
-        }
-    }
-
-    /**
-     * Visits every in-memory data ignite entry on the remote node it resides 
by co-locating visiting
-     * closure with the cache key.
-     *
-     * @param c Cache to use.
-     */
-    private def visitUsingAffinityRun(c: IgniteCache[Int, String]) {
-        (0 until KEY_CNT).foreach (i =>
-            ignite$.compute ().affinityRun (NAME, i,
-                () => println ("Co-located using affinityRun [key= " + i + ", 
value=" + c.localPeek (i) + ']') )
-        )
-    }
-
-    /**
-     * Collocates jobs with keys they need to work.
-     *
-     * @param c Cache to use.
-     */
-    private def visitUsingMapKeysToNodes(c: IgniteCache[Int, String]) {
-        val keys = (0 until KEY_CNT).toSeq
-
-        // Map all keys to nodes.
-        val mappings = ignite$.cluster().mapKeysToNodes(NAME, keys)
-
-        mappings.foreach(mapping => {
-            val node = mapping._1
-            val mappedKeys = mapping._2
-
-            if (node != null) {
-                ignite$.cluster().forNode(node) *< (() => {
-                    // Check cache without loading the value.
-                    mappedKeys.foreach(key => println("Co-located using 
mapKeysToNodes [key= " + key +
-                        ", value=" + c.localPeek(key) + ']'))
-                }, null)
-            }
-        })
-    }
-
-    /**
-     * Populates given cache.
-     *
-     * @param c Cache to populate.
-     */
-    private def populate(c: Cache) {
-        (0 until KEY_CNT).foreach(i => c += (i -> i.toString))
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheExample.scala
 
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheExample.scala
deleted file mode 100644
index 42e8ca4..0000000
--- 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheExample.scala
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.scalar.examples
-
-import org.apache.ignite.events.Event
-import org.apache.ignite.events.EventType._
-import org.apache.ignite.lang.IgnitePredicate
-import org.apache.ignite.scalar.scalar
-import org.apache.ignite.scalar.scalar._
-
-import scala.collection.JavaConversions._
-
-/**
- * Demonstrates basic In-Memory Data Ignite Cluster operations with Scalar.
- * <p/>
- * Remote nodes should always be started with special configuration file which
- * enables P2P class loading: `'ignite.{sh|bat} 
examples/config/example-ignite.xml'`.
- * <p/>
- * Alternatively you can run `ExampleNodeStartup` in another JVM which will
- * start node with `examples/config/example-ignite.xml` configuration.
- */
-object ScalarCacheExample extends App {
-    /** Configuration file name. */
-    private val CONFIG = "examples/config/example-ignite.xml"
-
-    /** Name of cache specified in spring configuration. */
-    private val NAME = ScalarCacheExample.getClass.getSimpleName
-
-    scalar(CONFIG) {
-        val cache = createCache$[String, Int](NAME)
-
-        try {
-            registerListener()
-
-            basicOperations()
-        }
-        finally {
-            cache.close()
-        }
-    }
-
-    /**
-     * Demos basic cache operations.
-     */
-    def basicOperations() {
-        val c = cache$[String, Int](NAME).get
-
-        // Add few values.
-        c += (1.toString -> 1)
-        c += (2.toString -> 2)
-
-        // Update values.
-        c += (1.toString -> 11)
-        c += (2.toString -> 22)
-
-        c += (1.toString -> 31)
-        c += (2.toString -> 32)
-        c += ((2.toString, 32))
-
-        // Remove couple of keys (if any).
-        c -= (11.toString, 22.toString)
-
-        // Put one more value.
-        c += (3.toString -> 11)
-
-        // Get with option...
-        c.opt(44.toString) match {
-            case Some(v) => sys.error("Should never happen.")
-            case None => println("Correct")
-        }
-
-        // Print all values.
-        c.iterator() foreach println
-    }
-
-    /**
-     * This method will register listener for cache events on all nodes,
-     * so we can actually see what happens underneath locally and remotely.
-     */
-    def registerListener() {
-        val g = ignite$
-
-        g *< (() => {
-            val lsnr = new IgnitePredicate[Event] {
-                override def apply(e: Event): Boolean = {
-                    println(e.shortDisplay)
-
-                    true
-                }
-            }
-
-            if (g.cluster().nodeLocalMap[String, AnyRef].putIfAbsent("lsnr", 
lsnr) == null) {
-                g.events().localListen(lsnr,
-                    EVT_CACHE_OBJECT_PUT,
-                    EVT_CACHE_OBJECT_READ,
-                    EVT_CACHE_OBJECT_REMOVED)
-
-                println("Listener is registered.")
-            }
-        }, null)
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCachePopularNumbersExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCachePopularNumbersExample.scala
 
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCachePopularNumbersExample.scala
deleted file mode 100644
index 828c5a3..0000000
--- 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCachePopularNumbersExample.scala
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.scalar.examples
-
-import java.lang.{Integer => JavaInt, Long => JavaLong}
-import java.util
-import java.util.Map.Entry
-import java.util.Timer
-import javax.cache.processor.{EntryProcessor, MutableEntry}
-
-import org.apache.ignite.cache.query.SqlFieldsQuery
-import org.apache.ignite.internal.util.scala.impl
-import org.apache.ignite.scalar.scalar
-import org.apache.ignite.scalar.scalar._
-import org.apache.ignite.stream.StreamReceiver
-import org.apache.ignite.{IgniteCache, IgniteException}
-
-import scala.collection.JavaConversions._
-import scala.util.Random
-
-/**
- * Real time popular number counter.
- * <p/>
- * Remote nodes should always be started with special configuration file which
- * enables P2P class loading: `'ignite.{sh|bat} 
examples/config/example-ignite.xml'`.
- * <p/>
- * Alternatively you can run `ExampleNodeStartup` in another JVM which will
- * start node with `examples/config/example-ignite.xml` configuration.
- * <p/>
- * The counts are kept in cache on all remote nodes. Top `10` counts from each 
node are then grabbed to produce
- * an overall top `10` list within the ignite.
- */
-object ScalarCachePopularNumbersExample extends App {
-    /** Configuration file name. */
-    private val CONFIG = "examples/config/example-ignite.xml"
-
-    /** Cache name. */
-    private final val NAME = 
ScalarCachePopularNumbersExample.getClass.getSimpleName
-
-    /** Count of most popular numbers to retrieve from cluster. */
-    private final val POPULAR_NUMBERS_CNT = 10
-
-    /** Random number generator. */
-    private final val RAND = new Random()
-
-    /** Range within which to generate numbers. */
-    private final val RANGE = 1000
-
-    /** Count of total numbers to generate. */
-    private final val CNT = 1000000
-
-    scalar(CONFIG) {
-        val cache = createCache$[JavaInt, JavaLong](NAME, indexedTypes = 
Seq(classOf[JavaInt], classOf[JavaLong]))
-
-        println()
-        println(">>> Cache popular numbers example started.")
-
-        try {
-            val prj = ignite$.cluster().forCacheNodes(NAME)
-
-            if (prj.nodes().isEmpty)
-                println("Ignite does not have cache configured: " + NAME)
-            else {
-                val popularNumbersQryTimer = new Timer("numbers-query-worker")
-
-                try {
-                    // Schedule queries to run every 3 seconds during 
populates cache phase.
-                    
popularNumbersQryTimer.schedule(timerTask(query(POPULAR_NUMBERS_CNT)), 3000, 
3000)
-
-                    streamData()
-
-                    // Force one more run to get final counts.
-                    query(POPULAR_NUMBERS_CNT)
-                }
-                finally {
-                    popularNumbersQryTimer.cancel()
-                }
-            }
-        }
-        finally {
-            cache.close()
-        }
-    }
-
-    /**
-     * Populates cache in real time with numbers and keeps count for every 
number.
-     * @throws IgniteException If failed.
-     */
-    @throws[IgniteException]
-    def streamData() {
-        // Set larger per-node buffer size since our state is relatively small.
-        // Reduce parallel operations since we running the whole ignite 
cluster locally under heavy load.
-        val smtr = dataStreamer$[JavaInt, JavaLong](NAME, 2048)
-
-        smtr.receiver(new IncrementingUpdater())
-
-        (0 until CNT) foreach (_ => smtr.addData(RAND.nextInt(RANGE), 1L))
-
-        smtr.close(false)
-    }
-
-    /**
-     * Queries a subset of most popular numbers from in-memory data ignite 
cluster.
-     *
-     * @param cnt Number of most popular numbers to return.
-     */
-    def query(cnt: Int) {
-        val results = cache$[JavaInt, JavaLong](NAME).get
-            .query(new SqlFieldsQuery("select _key, _val from Long order by 
_val desc, _key limit " + cnt))
-            .getAll
-
-        results.foreach(res => println(res.get(0) + "=" + res.get(1)))
-
-        println("------------------")
-    }
-
-    /**
-     * Increments value for key.
-     */
-    private class IncrementingUpdater extends StreamReceiver[JavaInt, 
JavaLong] {
-        private[this] final val INC = new EntryProcessor[JavaInt, JavaLong, 
Object]() {
-            /** Process entries to increase value by entry key. */
-            override def process(e: MutableEntry[JavaInt, JavaLong], args: 
AnyRef*): Object = {
-                e.setValue(Option(e.getValue)
-                    .map(l => JavaLong.valueOf(l + 1))
-                    .getOrElse(JavaLong.valueOf(1L)))
-
-                null
-            }
-        }
-
-        @impl def receive(cache: IgniteCache[JavaInt, JavaLong], entries: 
util.Collection[Entry[JavaInt, JavaLong]]) {
-            entries.foreach(entry => cache.invoke(entry.getKey, INC))
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheQueryExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheQueryExample.scala
 
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheQueryExample.scala
deleted file mode 100644
index b8054eb..0000000
--- 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCacheQueryExample.scala
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.scalar.examples
-
-import java.util._
-
-import org.apache.ignite.cache.CacheMode._
-import org.apache.ignite.cache.affinity.AffinityKey
-import org.apache.ignite.configuration.CacheConfiguration
-import org.apache.ignite.scalar.scalar
-import org.apache.ignite.scalar.scalar._
-import org.apache.ignite.{Ignite, IgniteCache}
-
-import scala.collection.JavaConversions._
-
-/**
- * Demonstrates cache ad-hoc queries with Scalar.
- * <p/>
- * Remote nodes should be started using `ExampleNodeStartup` which will
- * start node with `examples/config/example-ignite.xml` configuration.
- */
-object ScalarCacheQueryExample {
-    /** Configuration file name. */
-    private val CONFIG = "examples/config/example-ignite.xml"
-
-    /** Cache name. */
-    private val NAME = ScalarCacheQueryExample.getClass.getSimpleName
-
-    /**
-     * Example entry point. No arguments required.
-     *
-     * @param args Command line arguments. None required.
-     */
-    def main(args: Array[String]) {
-        scalar(CONFIG) {
-            val cache = createCache$(NAME, indexedTypes = Seq(classOf[UUID], 
classOf[Organization],
-                classOf[AffinityKey[_]], classOf[Person]))
-
-            try {
-                example(ignite$)
-            }
-            finally {
-                cache.close()
-            }
-        }
-    }
-
-    /**
-     * Runs the example.
-     *
-     * @param ignite Ignite instance to use.
-     */
-    private def example(ignite: Ignite) {
-        // Populate cache.
-        initialize()
-
-        // Cache instance shortcut.
-        val cache = mkCache[AffinityKey[UUID], Person]
-
-        // Using distributed queries for partitioned cache and local queries 
for replicated cache.
-        // Since in replicated caches data is available on all nodes, 
including local one,
-        // it is enough to just query the local node.
-        val prj = if 
(cache.getConfiguration(classOf[CacheConfiguration[AffinityKey[UUID], 
Person]]).getCacheMode == PARTITIONED)
-            ignite.cluster().forRemotes()
-        else
-            ignite.cluster().forLocal()
-
-        // Example for SQL-based querying employees based on salary ranges.
-        // Gets all persons with 'salary > 1000'.
-        print("People with salary more than 1000: ", cache.sql("salary > 
1000").getAll.map(e => e.getValue))
-
-        // Example for TEXT-based querying for a given string in people 
resumes.
-        // Gets all persons with 'Bachelor' degree.
-        print("People with Bachelor degree: ", 
cache.text("Bachelor").getAll.map(e => e.getValue))
-    }
-
-    /**
-     * Gets instance of typed cache view to use.
-     *
-     * @return Cache to use.
-     */
-    private def mkCache[K, V]: IgniteCache[K, V] = cache$[K, V](NAME).get
-
-    /**
-     * Populates cache with test data.
-     */
-    private def initialize() {
-        // Clean up caches on all nodes before run.
-        cache$(NAME).get.clear()
-
-        // Organization cache projection.
-        val orgCache = mkCache[UUID, Organization]
-
-        // Organizations.
-        val org1 = Organization("Ignite")
-        val org2 = Organization("Other")
-
-        orgCache += (org1.id -> org1)
-        orgCache += (org2.id -> org2)
-
-        // Person cache projection.
-        val prnCache = mkCache[AffinityKey[UUID], Person]
-
-        // People.
-        val p1 = Person(org1, "John", "Doe", 2000, "John Doe has Master 
Degree.")
-        val p2 = Person(org1, "Jane", "Doe", 1000, "Jane Doe has Bachelor 
Degree.")
-        val p3 = Person(org2, "John", "Smith", 1500, "John Smith has Bachelor 
Degree.")
-        val p4 = Person(org2, "Jane", "Smith", 2500, "Jane Smith has Master 
Degree.")
-
-        // Note that in this example we use custom affinity key for Person 
objects
-        // to ensure that all persons are collocated with their organizations.
-        prnCache += (p1.key -> p1)
-        prnCache += (p2.key -> p2)
-        prnCache += (p3.key -> p3)
-        prnCache += (p4.key -> p4)
-    }
-
-    /**
-     * Prints object or collection of objects to standard out.
-     *
-     * @param msg Message to print before object is printed.
-     * @param o Object to print, can be `Iterable`.
-     */
-    private def print(msg: String, o: Any) {
-        assert(msg != null)
-        assert(o != null)
-
-        println(">>> " + msg)
-
-        o match {
-            case it: Iterable[Any] => it.foreach(e => println(">>>     " + 
e.toString))
-            case _ => println(">>>     " + o.toString)
-        }
-    }
-}
-
-/**
- * Organization class.
- */
-private case class Organization(
-    @ScalarCacheQuerySqlField
-    name: String
-) {
-    /** Organization ID. */
-    @ScalarCacheQuerySqlField
-    val id = UUID.randomUUID
-}
-
-/**
- * Person class.
- */
-private case class Person(
-    org: Organization,
-    firstName: String,
-    lastName: String,
-    @ScalarCacheQuerySqlField
-    salary: Double,
-    @ScalarCacheQueryTextField
-    resume: String
-) {
-    /** Person ID. */
-    val id = UUID.randomUUID
-
-    /** Organization ID. */
-    @ScalarCacheQuerySqlField
-    val orgId = org.id
-
-    /** Affinity key for this person. */
-    val key = new AffinityKey[UUID](id, org.id)
-
-    /**
-     * `toString` implementation.
-     */
-    override def toString: String = {
-        firstName + " " + lastName + " [salary: " + salary + ", resume: " + 
resume + "]"
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarClosureExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarClosureExample.scala
 
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarClosureExample.scala
deleted file mode 100644
index 719f216..0000000
--- 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarClosureExample.scala
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.scalar.examples
-
-import org.apache.ignite.cluster.ClusterNode
-import org.apache.ignite.scalar.scalar
-import org.apache.ignite.scalar.scalar._
-
-/**
- * Demonstrates various closure executions on the cloud using Scalar.
- * <p/>
- * Remote nodes should always be started with special configuration file which
- * enables P2P class loading: `'ignite.{sh|bat} 
examples/config/example-ignite.xml'`.
- * <p/>
- * Alternatively you can run `ExampleNodeStartup` in another JVM which will
- * start node with `examples/config/example-ignite.xml` configuration.
- */
-object ScalarClosureExample extends App {
-    scalar("examples/config/example-ignite.xml") {
-        topology()
-        helloWorld()
-        helloWorld2()
-        broadcast()
-        greetRemotes()
-        greetRemotesAgain()
-    }
-
-    /**
-     * Prints ignite topology.
-     */
-    def topology() {
-        ignite$ foreach (n => println("Node: " + nid8$(n)))
-    }
-
-    /**
-     * Obligatory example (2) - cloud enabled Hello World!
-     */
-    def helloWorld2() {
-        // Notice the example usage of Java-side closure 'F.println(...)' and 
method 'scala'
-        // that explicitly converts Java side object to a proper Scala 
counterpart.
-        // This method is required since implicit conversion won't be applied 
here.
-        ignite$.run$(for (w <- "Hello World!".split(" ")) yield () => 
println(w), null)
-    }
-
-    /**
-     * Obligatory example - cloud enabled Hello World!
-     */
-    def helloWorld() {
-        ignite$.run$("HELLO WORLD!".split(" ") map (w => () => println(w)), 
null)
-    }
-
-    /**
-     * One way to execute closures on the ignite cluster.
-     */
-    def broadcast() {
-        ignite$.bcastRun(() => println("Broadcasting!!!"), null)
-    }
-
-    /**
-     *  Greats all remote nodes only.
-     */
-    def greetRemotes() {
-        val me = ignite$.cluster().localNode.id
-
-        // Note that usage Java-based closure.
-        ignite$.cluster().forRemotes() match {
-            case p if p.isEmpty => println("No remote nodes!")
-            case p => p.bcastRun(() => println("Greetings from: " + me), null)
-        }
-    }
-
-    /**
-     * Same as previous greetings for all remote nodes but remote cluster 
group is filtered manually.
-     */
-    def greetRemotesAgain() {
-        val me = ignite$.cluster().localNode.id
-
-        // Just show that we can create any groups we like...
-        // Note that usage of Java-based closure via 'F' typedef.
-        ignite$.cluster().forPredicate((n: ClusterNode) => n.id != me) match {
-            case p if p.isEmpty => println("No remote nodes!")
-            case p => p.bcastRun(() => println("Greetings again from: " + me), 
null)
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarContinuationExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarContinuationExample.scala
 
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarContinuationExample.scala
deleted file mode 100644
index 203f0b7..0000000
--- 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarContinuationExample.scala
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.scalar.examples
-
-import org.apache.ignite.compute.ComputeJobContext
-import org.apache.ignite.lang.{IgniteClosure, IgniteFuture}
-import org.apache.ignite.resources.JobContextResource
-import org.apache.ignite.scalar.scalar
-import org.apache.ignite.scalar.scalar._
-import org.jetbrains.annotations.Nullable
-
-import java.math._
-import java.util
-
-/**
- * This example recursively calculates `Fibonacci` numbers on the ignite 
cluster. This is
- * a powerful design pattern which allows for creation of fully distributively 
recursive
- * (a.k.a. nested) tasks or closures with continuations. This example also 
shows
- * usage of `continuations`, which allows us to wait for results from remote 
nodes
- * without blocking threads.
- * <p/>
- * Note that because this example utilizes local node storage via `NodeLocal`,
- * it gets faster if you execute it multiple times, as the more you execute it,
- * the more values it will be cached on remote nodes.
- * <p/>
- * Remote nodes should always be started with special configuration file which
- * enables P2P class loading: `'ignite.{sh|bat} 
examples/config/example-ignite.xml'`.
- * <p/>
- * Alternatively you can run `ExampleNodeStartup` in another JVM which will
- * start node with `examples/config/example-ignite.xml` configuration.
- */
-object ScalarContinuationExample {
-    def main(args: Array[String]) {
-        scalar("examples/config/example-ignite.xml") {
-            // Calculate fibonacci for N.
-            val N: Long = 100
-
-            val thisNode = ignite$.cluster().localNode
-
-            val start = System.currentTimeMillis
-
-            // Group that excludes this node if others exists.
-            val prj = if (ignite$.cluster().nodes().size() > 1) 
ignite$.cluster().forOthers(thisNode) else ignite$.cluster().forNode(thisNode)
-
-            val fib = ignite$.compute(prj).apply(new 
FibonacciClosure(thisNode.id()), N)
-
-            val duration = System.currentTimeMillis - start
-
-            println(">>>")
-            println(">>> Finished executing Fibonacci for '" + N + "' in " + 
duration + " ms.")
-            println(">>> Fibonacci sequence for input number '" + N + "' is '" 
+ fib + "'.")
-            println(">>> You should see prints out every recursive Fibonacci 
execution on cluster nodes.")
-            println(">>> Check remote nodes for output.")
-            println(">>>")
-        }
-    }
-}
-
-/**
- * Closure to execute.
- *
- * @param excludeNodeId Node to exclude from execution if there are more then 
1 node in cluster.
- */
-class FibonacciClosure (
-    private[this] val excludeNodeId: util.UUID
-) extends IgniteClosure[Long, BigInteger] {
-    // These fields must be *transient* so they do not get
-    // serialized and sent to remote nodes.
-    // However, these fields will be preserved locally while
-    // this closure is being "held", i.e. while it is suspended
-    // and is waiting to be continued.
-    @transient private var fut1, fut2: IgniteFuture[BigInteger] = null
-
-    // Auto-inject job context.
-    @JobContextResource
-    private val jobCtx: ComputeJobContext = null
-
-    @Nullable override def apply(num: Long): BigInteger = {
-        if (fut1 == null || fut2 == null) {
-            println(">>> Starting fibonacci execution for number: " + num)
-
-            // Make sure n is not negative.
-            val n = math.abs(num)
-
-            val g = ignite$
-
-            if (n <= 2)
-                return if (n == 0)
-                    BigInteger.ZERO
-                else
-                    BigInteger.ONE
-
-            // Get properly typed node-local storage.
-            val store = g.cluster().nodeLocalMap[Long, 
IgniteFuture[BigInteger]]()
-
-            // Check if value is cached in node-local store first.
-            fut1 = store.get(n - 1)
-            fut2 = store.get(n - 2)
-
-            val excludeNode = ignite$.cluster().node(excludeNodeId)
-
-            // Group that excludes node with id passed in constructor if 
others exists.
-            val prj = if (ignite$.cluster().nodes().size() > 1) 
ignite$.cluster().forOthers(excludeNode) else 
ignite$.cluster().forNode(excludeNode)
-
-            val comp = ignite$.compute(prj).withAsync()
-
-            // If future is not cached in node-local store, cache it.
-            // Note recursive execution!
-            if (fut1 == null) {
-                comp.apply(new FibonacciClosure(excludeNodeId), n - 1)
-
-                val futVal = comp.future[BigInteger]()
-
-                fut1 = store.putIfAbsent(n - 1, futVal)
-
-                if (fut1 == null)
-                    fut1 = futVal
-            }
-
-            // If future is not cached in node-local store, cache it.
-            if (fut2 == null) {
-                comp.apply(new FibonacciClosure(excludeNodeId), n - 2)
-
-                val futVal = comp.future[BigInteger]()
-
-                fut2 = store.putIfAbsent(n - 2, futVal)
-
-                if (fut2 == null)
-                    fut2 = futVal
-            }
-
-            // If futures are not done, then wait asynchronously for the result
-            if (!fut1.isDone || !fut2.isDone) {
-                val lsnr = (fut: IgniteFuture[BigInteger]) => {
-                    // This method will be called twice, once for each future.
-                    // On the second call - we have to have both futures to be 
done
-                    // - therefore we can call the continuation.
-                    if (fut1.isDone && fut2.isDone)
-                        jobCtx.callcc() // Resume job execution.
-                }
-
-                // Hold (suspend) job execution.
-                // It will be resumed in listener above via 'callcc()' call
-                // once both futures are done.
-                jobCtx.holdcc()
-
-                // Attach the same listener to both futures.
-                fut1.listen(lsnr)
-                fut2.listen(lsnr)
-
-                return null
-            }
-        }
-
-        assert(fut1.isDone && fut2.isDone)
-
-        // Return cached results.
-        fut1.get.add(fut2.get)
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCreditRiskExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCreditRiskExample.scala
 
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCreditRiskExample.scala
deleted file mode 100644
index 1b0d767..0000000
--- 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCreditRiskExample.scala
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.scalar.examples
-
-import org.apache.ignite.scalar.scalar
-import org.apache.ignite.scalar.scalar._
-
-import scala.util.Random
-import scala.util.control.Breaks._
-
-/**
- * Scalar-based Monte-Carlo example.
- * <p/>
- * Remote nodes should always be started with special configuration file which
- * enables P2P class loading: `'ignite.{sh|bat} 
examples/config/example-ignite.xml'`.
- * <p/>
- * Alternatively you can run `ExampleNodeStartup` in another JVM which will
- * start node with `examples/config/example-ignite.xml` configuration.
- */
-object ScalarCreditRiskExample {
-    def main(args: Array[String]) {
-        scalar("examples/config/example-ignite.xml") {
-            // Create portfolio.
-            var portfolio = Seq.empty[Credit]
-
-            val rnd = new Random
-
-            // Generate some test portfolio items.
-            (0 until 5000).foreach(i =>
-                portfolio +:= Credit(
-                    50000 * rnd.nextDouble,
-                    rnd.nextInt(1000),
-                    rnd.nextDouble / 10,
-                    rnd.nextDouble / 20 + 0.02
-                )
-            )
-
-            // Forecast horizon in days.
-            val horizon = 365
-
-            // Number of Monte-Carlo iterations.
-            val iter = 10000
-
-            // Percentile.
-            val percentile = 0.95
-
-            // Mark the stopwatch.
-            val start = System.currentTimeMillis
-
-            // Calculate credit risk and print it out.
-            // As you can see the ignite cluster enabling is completely hidden 
from the caller
-            // and it is fully transparent to him. In fact, the caller is 
never directly
-            // aware if method was executed just locally or on the 100s of 
cluster nodes.
-            // Credit risk crdRisk is the minimal amount that creditor has to 
have
-            // available to cover possible defaults.
-            val crdRisk = ignite$ @< 
(closures(ignite$.cluster().nodes().size(), portfolio, horizon, iter, 
percentile),
-                (s: Seq[Double]) => s.sum / s.size, null)
-
-            println("Credit risk [crdRisk=" + crdRisk + ", duration=" +
-                (System.currentTimeMillis - start) + "ms]")
-        }
-    }
-
-    /**
-     * Creates closures for calculating credit risks.
-     *
-     * @param clusterSize Size of the cluster.
-     * @param portfolio Portfolio.
-     * @param horizon Forecast horizon in days.
-     * @param iter Number of Monte-Carlo iterations.
-     * @param percentile Percentile.
-     * @return Collection of closures.
-     */
-    private def closures(clusterSize: Int, portfolio: Seq[Credit], horizon: 
Int, iter: Int,
-        percentile: Double): Seq[() => Double] = {
-        val iterPerNode: Int = math.round(iter / 
clusterSize.asInstanceOf[Float])
-        val lastNodeIter: Int = iter - (clusterSize - 1) * iterPerNode
-
-        var cls = Seq.empty[() => Double]
-
-        (0 until clusterSize).foreach(i => {
-            val nodeIter = if (i == clusterSize - 1) lastNodeIter else 
iterPerNode
-
-            cls +:= (() => new 
CreditRiskManager().calculateCreditRiskMonteCarlo(
-                portfolio, horizon, nodeIter, percentile))
-        })
-
-        cls
-    }
-}
-
-/**
- * This class provides a simple model for a credit contract (or a loan). It is 
basically
- * defines as remaining crediting amount to date, credit remaining term, APR 
and annual
- * probability on default. Although this model is simplified for the purpose
- * of this example, it is close enough to emulate the real-life credit
- * risk assessment application.
- */
-private case class Credit(
-    remAmnt: Double, // Remaining crediting amount.
-    remTerm: Int,    // Remaining crediting remTerm.
-    apr: Double,     // Annual percentage rate (APR).
-    edf: Double      // Expected annual probability of default (EaDF).
-) {
-    /**
-     * Gets either credit probability of default for the given period of time
-     * if remaining term is less than crediting time or probability of default
-     * for whole remained crediting time.
-     *
-     * @param term Default term.
-     * @return Credit probability of default in relative percents
-     *     (percentage / 100).
-     */
-    def getDefaultProbability(term: Int): Double = {
-        (1 - math.exp(math.log(1 - edf) * math.min(remTerm, term) / 365.0))
-    }
-}
-
-/**
- * This class abstracts out the calculation of risk for a credit portfolio.
- */
-private class CreditRiskManager {
-    /**
-     * Default randomizer with normal distribution.
-     * Note that since every JVM on the ignite cluster will have its own random
-     * generator (independently initialized) the Monte-Carlo simulation
-     * will be slightly skewed when performed on the ignite cluster due to 
skewed
-     * normal distribution of the sub-jobs comparing to execution on the
-     * local node only with single random generator. Real-life applications
-     * may want to provide its own implementation of distributed random
-     * generator.
-     */
-    private val rndGen = new Random
-
-    /**
-     * Calculates credit risk for a given credit portfolio. This calculation 
uses
-     * Monte-Carlo Simulation to produce risk value.
-     *
-     * @param portfolio Credit portfolio.
-     * @param horizon Forecast horizon (in days).
-     * @param num Number of Monte-Carlo iterations.
-     * @param percentile Cutoff level.
-     * @return Credit risk value, i.e. the minimal amount that creditor has to
-     *      have available to cover possible defaults.
-     */
-    def calculateCreditRiskMonteCarlo(portfolio: Seq[Credit], horizon: Int, 
num:
-        Int, percentile: Double): Double = {
-        println(">>> Calculating credit risk for portfolio [size=" + 
portfolio.length + ", horizon=" +
-            horizon + ", percentile=" + percentile + ", iterations=" + num + 
"] <<<")
-
-        val start = System.currentTimeMillis
-
-        val losses = calculateLosses(portfolio, horizon, num).sorted
-        val lossProbs = new Array[Double](losses.size)
-
-        (0 until losses.size).foreach(i => {
-            if (i == 0)
-                lossProbs(i) = getLossProbability(losses, 0)
-            else if (losses(i) != losses(i - 1))
-                lossProbs(i) = getLossProbability(losses, i) + lossProbs(i - 1)
-            else
-                lossProbs(i) = lossProbs(i - 1)
-        })
-
-        var crdRisk = 0.0
-
-        breakable {
-            (0 until lossProbs.size).foreach(i => {
-                if (lossProbs(i) > percentile) {
-                    crdRisk = losses(i - 1)
-
-                    break()
-                }
-            })
-        }
-
-        println(">>> Finished calculating portfolio risk [risk=" + crdRisk +
-            ", time=" + (System.currentTimeMillis - start) + "ms]")
-
-        crdRisk
-    }
-
-    /**
-     * Calculates losses for the given credit portfolio using Monte-Carlo 
Simulation.
-     * Simulates probability of default only.
-     *
-     * @param portfolio Credit portfolio.
-     * @param horizon Forecast horizon.
-     * @param num Number of Monte-Carlo iterations.
-     * @return Losses array simulated by Monte Carlo method.
-     */
-    private def calculateLosses(portfolio: Seq[Credit], horizon: Int, num: 
Int): Array[Double] = {
-        val losses = new Array[Double](num)
-
-        // Count losses using Monte-Carlo method. We generate random 
probability of default,
-        // if it exceeds certain credit default value we count losses - 
otherwise count income.
-        (0 until num).foreach(i => {
-            portfolio.foreach(crd => {
-                val remDays = math.min(crd.remTerm, horizon)
-
-                if (rndGen.nextDouble >= 1 - 
crd.getDefaultProbability(remDays))
-                    // (1 + 'r' * min(H, W) / 365) * S.
-                    // Where W is a horizon, H is a remaining crediting term, 
'r' is an annual credit rate,
-                    // S is a remaining credit amount.
-                    losses(i) += (1 + crd.apr * math.min(horizon, crd.remTerm) 
/ 365) * crd.remAmnt
-                else
-                    // - 'r' * min(H,W) / 365 * S
-                    // Where W is a horizon, H is a remaining crediting term, 
'r' is a annual credit rate,
-                    // S is a remaining credit amount.
-                    losses(i) -= crd.apr * math.min(horizon, crd.remTerm) / 
365 * crd.remAmnt
-            })
-        })
-
-        losses
-    }
-
-    /**
-     * Calculates probability of certain loss in array of losses.
-     *
-     * @param losses Array of losses.
-     * @param i Index of certain loss in array.
-     * @return Probability of loss with given index.
-     */
-    private def getLossProbability(losses: Array[Double], i: Int): Double = {
-        var count = 0.0
-
-        losses.foreach(tmp => {
-            if (tmp == losses(i))
-                count += 1
-        })
-
-        count / losses.size
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarJvmCloudExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarJvmCloudExample.scala
 
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarJvmCloudExample.scala
deleted file mode 100644
index 1014726..0000000
--- 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarJvmCloudExample.scala
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.scalar.examples
-
-import java.util.concurrent.Executors
-import java.util.concurrent.TimeUnit._
-import javax.swing.{JComponent, JLabel, JOptionPane}
-
-import org.apache.ignite.configuration.IgniteConfiguration
-import org.apache.ignite.internal.util.scala.impl
-import org.apache.ignite.scalar.scalar
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder
-
-/**
- * This example demonstrates how you can easily startup multiple nodes
- * in the same JVM with Scala. All started nodes use default configuration
- * with only difference of the ignite cluster name which has to be different 
for
- * every node so they can be differentiated within JVM.
- * <p/>
- * Starting multiple nodes in the same JVM is especially useful during
- * testing and debugging as it allows you to create a full ignite cluster 
within
- * a test case, simulate various scenarios, and watch how jobs and data
- * behave within a ignite cluster.
- */
-object ScalarJvmCloudExample {
-    /** Names of nodes to start. */
-    val NODES = List("scalar-node-0", "scalar-node-1", "scalar-node-2", 
"scalar-node-3", "scalar-node-4")
-
-    def main(args: Array[String]) {
-        try {
-            // Shared IP finder for in-VM node discovery.
-            val ipFinder = new TcpDiscoveryVmIpFinder(true)
-
-            val pool = Executors.newFixedThreadPool(NODES.size)
-
-            // Concurrently startup all nodes.
-            NODES.foreach(name => pool.submit(new Runnable {
-                @impl def run() {
-                    // All defaults.
-                    val cfg = new IgniteConfiguration
-
-                    cfg.setGridName(name)
-
-                    // Configure in-VM TCP discovery so we don't
-                    // interfere with other ignites running on the same 
network.
-                    val discoSpi = new TcpDiscoverySpi
-
-                    discoSpi.setIpFinder(ipFinder)
-
-                    cfg.setDiscoverySpi(discoSpi)
-
-                    // Start node
-                    scalar.start(cfg)
-
-                    ()
-                }
-            }))
-
-            pool.shutdown()
-
-            pool.awaitTermination(Long.MaxValue, MILLISECONDS)
-
-            // Wait until Ok is pressed.
-            JOptionPane.showMessageDialog(
-                null,
-                Array[JComponent](
-                    new JLabel("Ignite JVM cloud started."),
-                    new JLabel("Number of nodes in the cluster: " + 
scalar.ignite$(NODES(1)).get.cluster().nodes().size()),
-                    new JLabel("Click OK to stop.")
-                ),
-                "Ignite",
-                JOptionPane.INFORMATION_MESSAGE)
-
-        }
-        // Stop all nodes
-        finally
-            NODES.foreach(node => scalar.stop(node, true))
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarPingPongExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarPingPongExample.scala
 
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarPingPongExample.scala
deleted file mode 100644
index 75784cf..0000000
--- 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarPingPongExample.scala
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.scalar.examples
-
-import java.util.UUID
-import java.util.concurrent.CountDownLatch
-
-import org.apache.ignite.messaging.MessagingListenActor
-import org.apache.ignite.scalar.scalar
-import org.apache.ignite.scalar.scalar._
-
-/**
- * Demonstrates simple protocol-based exchange in playing a ping-pong between
- * two nodes. It is analogous to `MessagingPingPongExample` on Java side.
- * <p/>
- * Remote nodes should always be started with special configuration file which
- * enables P2P class loading: `'ignite.{sh|bat} 
examples/config/example-ignite.xml'`.
- * <p/>
- * Alternatively you can run `ExampleNodeStartup` in another JVM which will
- * start node with `examples/config/example-ignite.xml` configuration.
- */
-object ScalarPingPongExample extends App {
-    scalar("examples/config/example-ignite.xml") {
-        pingPong()
-        //pingPong2()
-    }
-
-    /**
-     * Implements Ping Pong example between local and remote node.
-     */
-    def pingPong() {
-        val g = ignite$
-
-        if (g.cluster().nodes().size < 2) {
-            println(">>>")
-            println(">>> I need a partner to play a ping pong!")
-            println(">>>")
-
-            return
-        }
-        else {
-            // Pick first remote node as a partner.
-            val nodeB = g.cluster().forNode(g.remoteNodes$().head)
-
-            // Set up remote player: configure remote node 'rmt' to listen
-            // for messages from local node 'loc'.
-            g.message(nodeB).remoteListen(null, new 
MessagingListenActor[String]() {
-                def receive(nodeId: UUID, msg: String) {
-                    println(msg)
-
-                    msg match {
-                        case "PING" => respond("PONG")
-                        case "STOP" => stop()
-                    }
-                }
-            })
-
-            val latch = new CountDownLatch(10)
-
-            // Set up local player: configure local node 'loc'
-            // to listen for messages from remote node 'rmt'.
-            ignite$.message().localListen(null, new 
MessagingListenActor[String]() {
-                def receive(nodeId: UUID, msg: String) {
-                    println(msg)
-
-                    if (latch.getCount == 1)
-                        stop("STOP")
-                    else // We know it's 'PONG'.
-                        respond("PING")
-
-                    latch.countDown()
-                }
-            })
-
-            // Serve!
-            nodeB.send$("PING", null)
-
-            // Wait til the match is over.
-            latch.await()
-        }
-    }
-
-    /**
-     * Implements Ping Pong example between two remote nodes.
-     */
-    def pingPong2() {
-        val g = ignite$
-
-        if (g.cluster().forRemotes().nodes().size() < 2) {
-            println(">>>")
-            println(">>> I need at least two remote nodes!")
-            println(">>>")
-        }
-        else {
-            // Pick two remote nodes.
-            val n1 = g.cluster().forRemotes().head
-            val n2 = g.cluster().forRemotes().tail.head
-
-            val n1p = g.cluster().forNode(n1)
-            val n2p = g.cluster().forNode(n2)
-
-            // Configure remote node 'n1' to receive messages from 'n2'.
-            g.message(n1p).remoteListen(null, new MessagingListenActor[String] 
{
-                def receive(nid: UUID, msg: String) {
-                    println(msg)
-
-                    msg match {
-                        case "PING" => respond("PONG")
-                        case "STOP" => stop()
-                    }
-                }
-            })
-
-            // Configure remote node 'n2' to receive messages from 'n1'.
-            g.message(n2p).remoteListen(null, new MessagingListenActor[String] 
{
-                // Get local count down latch.
-                private lazy val latch: CountDownLatch = 
g.cluster().nodeLocalMap().get("latch")
-
-                def receive(nid: UUID, msg: String) {
-                    println(msg)
-
-                    latch.getCount match {
-                        case 1 => stop("STOP")
-                        case _ => respond("PING")
-                    }
-
-                    latch.countDown()
-                }
-            })
-
-            // 1. Sets latch into node local storage so that local actor could 
use it.
-            // 2. Sends first 'PING' to 'n1'.
-            // 3. Waits until all messages are exchanged between two remote 
nodes.
-            n2p.run$(() => {
-                val latch = new CountDownLatch(10)
-
-                g.cluster().nodeLocalMap[String, CountDownLatch].put("latch", 
latch)
-
-                n1p.send$("PING", null)
-
-                latch.await()
-            }, null)
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarPrimeExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarPrimeExample.scala
 
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarPrimeExample.scala
deleted file mode 100644
index 867783b..0000000
--- 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarPrimeExample.scala
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.scalar.examples
-
-import java.util
-
-import org.apache.ignite.scalar.scalar
-import org.apache.ignite.scalar.scalar._
-
-import scala.util.control.Breaks._
-
-/**
- * Prime Number calculation example based on Scalar.
- *
- * ==Starting Remote Nodes==
- * To try this example you should (but don't have to) start remote ignite 
instances.
- * You can start as many as you like by executing the following script:
- * `{IGNITE_HOME}/bin/ignite.{bat|sh} examples/config/example-ignite.xml`
- * <p/>
- * Once remote instances are started, you can execute this example from
- * Eclipse, IntelliJ IDEA, or NetBeans (and any other Java IDE) by simply 
hitting run
- * button. You will see that all nodes discover each other and
- * all of the nodes will participate in task execution (check node
- * output).
- * <p/>
- * Note that when running this example on a multi-core box, simply
- * starting additional cluster node on the same box will speed up
- * prime number calculation by a factor of 2.
- */
-object ScalarPrimeExample {
-    /**
-     * Main entry point to application. No arguments required.
-     *
-     * @param args Command like argument (not used).
-     */
-    def main(args: Array[String]){
-        scalar("examples/config/example-ignite.xml") {
-            val start = System.currentTimeMillis
-
-            // Values we want to check for prime.
-            val checkVals = Array(32452841L, 32452843L, 32452847L, 32452849L, 
236887699L, 217645199L)
-
-            println(">>>")
-            println(">>> Starting to check the following numbers for primes: " 
+ util.Arrays.toString(checkVals))
-
-            val g = ignite$
-
-            checkVals.foreach(checkVal => {
-                val divisor = g.reduce$[Option[Long], Option[Option[Long]]](
-                    closures(g.cluster().nodes().size(), checkVal), 
_.find(_.isDefined), null)
-
-                if (!divisor.isDefined)
-                    println(">>> Value '" + checkVal + "' is a prime number")
-                else
-                    println(">>> Value '" + checkVal + "' is divisible by '" + 
divisor.get.get + '\'')
-            })
-
-            val totalTime = System.currentTimeMillis - start
-
-            println(">>> Total time to calculate all primes (milliseconds): " 
+ totalTime)
-            println(">>>")
-        }
-    }
-
-    /**
-     * Creates closures for checking passed in value for prime.
-     *
-     * Every closure gets a range of divisors to check. The lower and
-     * upper boundaries of this range are passed into closure.
-     * Closures checks if the value passed in is divisible by any of
-     * the divisors in the range.
-     *
-     * @param clusterSize Size of the cluster.
-     * @param checkVal Value to check.
-     * @return Collection of closures.
-     */
-    private def closures(clusterSize: Int, checkVal: Long): Seq[() => 
Option[Long]] = {
-        var cls = Seq.empty[() => Option[Long]]
-
-        val taskMinRange = 2L
-        val numbersPerTask = if (checkVal / clusterSize < 10) 10L else 
checkVal / clusterSize
-
-        var minRange = 0L
-        var maxRange = 0L
-
-        var i = 0
-
-        while (maxRange < checkVal) {
-            minRange = i * numbersPerTask + taskMinRange
-            maxRange = (i + 1) * numbersPerTask + taskMinRange - 1
-
-            if (maxRange > checkVal)
-                maxRange = checkVal
-
-            val min = minRange
-            val max = maxRange
-
-            cls +:= (() => {
-                var divisor: Option[Long] = None
-
-                breakable {
-                    (min to max).foreach(d => {
-                        if (d != 1 && d != checkVal && checkVal % d == 0) {
-                             divisor = Some(d)
-
-                             break()
-                        }
-                    })
-                }
-
-                divisor
-            })
-
-            i += 1
-        }
-
-        cls
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/41a5bf67/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarScheduleExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarScheduleExample.scala
 
b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarScheduleExample.scala
deleted file mode 100644
index 8734a23..0000000
--- 
a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarScheduleExample.scala
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.scalar.examples
-
-import org.apache.ignite.scalar.scalar
-import org.apache.ignite.scalar.scalar._
-
-/**
- * Demonstrates a cron-based `Runnable` execution scheduling.
- * Test runnable object broadcasts a phrase to all cluster nodes every minute
- * three times with initial scheduling delay equal to five seconds.
- * <p/>
- * Remote nodes should always be started with special configuration file which
- * enables P2P class loading: `'ignite.{sh|bat} 
examples/config/example-ignite.xml'`.
- * <p/>
- * Alternatively you can run `ExampleNodeStartup` in another JVM which will
- * start node with `examples/config/example-ignite.xml` configuration.
- */
-object ScalarScheduleExample extends App {
-    scalar("examples/config/example-ignite.xml") {
-        println()
-        println("Compute schedule example started.")
-
-        val g = ignite$
-
-        var invocations = 0
-
-        // Schedule callable that returns incremented value each time.
-        val fut = ignite$.scheduleLocalCall(
-            () => {
-                invocations += 1
-
-                g.bcastRun(() => {
-                    println()
-                    println("Howdy! :)")
-                }, null)
-
-                invocations
-            },
-            "{5, 3} * * * * *" // Cron expression.
-        )
-
-        while (!fut.isDone)
-            println(">>> Invocation #: " + fut.get)
-
-        // Prints.
-        println()
-        println(">>> Schedule future is done and has been unscheduled.")
-        println(">>> Check all nodes for hello message output.")
-    }
-}

Reply via email to