http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7a9a1db/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/RestBinaryProtocolSelfTest.java ---------------------------------------------------------------------- diff --cc modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/RestBinaryProtocolSelfTest.java index 0000000,b82b2eb..f14a4ad mode 000000,100644..100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/RestBinaryProtocolSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/RestBinaryProtocolSelfTest.java @@@ -1,0 -1,633 +1,634 @@@ + /* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + package org.apache.ignite.internal.processors.rest; + + import org.apache.ignite.*; + import org.apache.ignite.cache.*; + import org.apache.ignite.compute.*; + import org.apache.ignite.configuration.*; + import org.apache.ignite.internal.*; + import org.apache.ignite.spi.discovery.tcp.*; + import org.apache.ignite.spi.discovery.tcp.ipfinder.*; + import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.*; + import org.apache.ignite.internal.processors.rest.client.message.*; + import org.apache.ignite.internal.util.typedef.*; + import org.apache.ignite.internal.util.typedef.internal.*; + import org.apache.ignite.testframework.*; + import org.apache.ignite.testframework.junits.common.*; + import org.jetbrains.annotations.*; + + import java.io.*; + import java.lang.reflect.*; + import java.util.*; + import java.util.concurrent.*; + + import static org.apache.ignite.cache.CacheMode.*; + import static org.apache.ignite.cache.CacheWriteSynchronizationMode.*; + + /** + * TCP protocol test. + */ + @SuppressWarnings("unchecked") + public class RestBinaryProtocolSelfTest extends GridCommonAbstractTest { + /** */ + private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); + + /** */ + private static final String CACHE_NAME = "cache"; + + /** */ + private static final String HOST = "127.0.0.1"; + + /** */ + private static final int PORT = 11212; + + /** */ + private TestBinaryClient client; + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + startGrid(); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + stopAllGrids(); + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + client = client(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + client.shutdown(); + + grid().cache(null).clearAll(); + grid().cache(CACHE_NAME).clearAll(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(gridName); + + cfg.setLocalHost(HOST); + + assert cfg.getClientConnectionConfiguration() == null; + + ClientConnectionConfiguration clientCfg = new ClientConnectionConfiguration(); + + clientCfg.setRestTcpPort(PORT); + + cfg.setClientConnectionConfiguration(clientCfg); + + TcpDiscoverySpi disco = new TcpDiscoverySpi(); + + disco.setIpFinder(IP_FINDER); + + cfg.setDiscoverySpi(disco); + + cfg.setCacheConfiguration(cacheConfiguration(null), cacheConfiguration(CACHE_NAME)); + + return cfg; + } + + /** + * @param cacheName Cache name. + * @return Cache configuration. + * @throws Exception In case of error. + */ + private CacheConfiguration cacheConfiguration(@Nullable String cacheName) throws Exception { + CacheConfiguration cfg = defaultCacheConfiguration(); + + cfg.setCacheMode(LOCAL); + cfg.setName(cacheName); + cfg.setWriteSynchronizationMode(FULL_SYNC); ++ cfg.setStatisticsEnabled(true); + + return cfg; + } + + /** + * @return Client. + * @throws IgniteCheckedException In case of error. + */ + private TestBinaryClient client() throws IgniteCheckedException { + return new TestBinaryClient(HOST, PORT); + } + + /** + * @throws Exception If failed. + */ + public void testPut() throws Exception { + assertTrue(client.cachePut(null, "key1", "val1")); + assertEquals("val1", grid().cache(null).get("key1")); + + assertTrue(client.cachePut(CACHE_NAME, "key1", "val1")); + assertEquals("val1", grid().cache(CACHE_NAME).get("key1")); + } + + /** + * @throws Exception If failed. + */ + public void testPutAll() throws Exception { + client.cachePutAll(null, F.asMap("key1", "val1", "key2", "val2")); + + Map<String, String> map = grid().<String, String>cache(null).getAll(Arrays.asList("key1", "key2")); + + assertEquals(2, map.size()); + assertEquals("val1", map.get("key1")); + assertEquals("val2", map.get("key2")); + + client.cachePutAll(CACHE_NAME, F.asMap("key1", "val1", "key2", "val2")); + + map = grid().<String, String>cache(CACHE_NAME).getAll(Arrays.asList("key1", "key2")); + + assertEquals(2, map.size()); + assertEquals("val1", map.get("key1")); + assertEquals("val2", map.get("key2")); + } + + /** + * @throws Exception If failed. + */ + public void testGet() throws Exception { + assertTrue(grid().cache(null).putx("key", "val")); + + assertEquals("val", client.cacheGet(null, "key")); + + assertTrue(grid().cache(CACHE_NAME).putx("key", "val")); + + assertEquals("val", client.cacheGet(CACHE_NAME, "key")); + } + + /** + * @throws Exception If failed. + */ + public void testFailure() throws Exception { + GridKernal kernal = ((GridKernal)grid()); + + GridRestProcessor proc = kernal.context().rest(); + + // Clearing handlers collection to force failure. + Field hndField = proc.getClass().getDeclaredField("handlers"); + + hndField.setAccessible(true); + + Map handlers = (Map)hndField.get(proc); + + Map cp = new HashMap(handlers); + + handlers.clear(); + + try { + GridTestUtils.assertThrows( + log, + new Callable<Object>() { + @Override public Object call() throws Exception { + return client.cacheGet(null, "key"); + } + }, + IgniteCheckedException.class, + "Failed to process client request: Failed to find registered handler for command: CACHE_GET"); + } + finally { + handlers.putAll(cp); + } + } + + /** + * @throws Exception If failed. + */ + public void testGetAll() throws Exception { + assertTrue(grid().cache(null).putx("key1", "val1")); + assertTrue(grid().cache(null).putx("key2", "val2")); + + Map<String, String> map = client.cacheGetAll(null, "key1", "key2"); + + assertEquals(2, map.size()); + assertEquals("val1", map.get("key1")); + assertEquals("val2", map.get("key2")); + + assertTrue(grid().cache(null).putx("key3", "val3")); + assertTrue(grid().cache(null).putx("key4", "val4")); + + map = client.cacheGetAll(null, "key3", "key4"); + + assertEquals(2, map.size()); + assertEquals("val3", map.get("key3")); + assertEquals("val4", map.get("key4")); + + assertTrue(grid().cache(CACHE_NAME).putx("key1", "val1")); + assertTrue(grid().cache(CACHE_NAME).putx("key2", "val2")); + + map = client.cacheGetAll(CACHE_NAME, "key1", "key2"); + + assertEquals(2, map.size()); + assertEquals("val1", map.get("key1")); + assertEquals("val2", map.get("key2")); + + assertTrue(grid().cache(CACHE_NAME).putx("key3", "val3")); + assertTrue(grid().cache(CACHE_NAME).putx("key4", "val4")); + + map = client.cacheGetAll(CACHE_NAME, "key3", "key4"); + + assertEquals(2, map.size()); + assertEquals("val3", map.get("key3")); + assertEquals("val4", map.get("key4")); + } + + /** + * @throws Exception If failed. + */ + public void testRemove() throws Exception { + assertTrue(grid().cache(null).putx("key", "val")); + + assertTrue(client.cacheRemove(null, "key")); + assertFalse(client.cacheRemove(null, "wrongKey")); + + assertNull(grid().cache(null).get("key")); + + assertTrue(grid().cache(CACHE_NAME).putx("key", "val")); + + assertTrue(client.cacheRemove(CACHE_NAME, "key")); + assertFalse(client.cacheRemove(CACHE_NAME, "wrongKey")); + + assertNull(grid().cache(CACHE_NAME).get("key")); + } + + /** + * @throws Exception If failed. + */ + public void testRemoveAll() throws Exception { + assertTrue(grid().cache(null).putx("key1", "val1")); + assertTrue(grid().cache(null).putx("key2", "val2")); + assertTrue(grid().cache(null).putx("key3", "val3")); + assertTrue(grid().cache(null).putx("key4", "val4")); + + client.cacheRemoveAll(null, "key1", "key2"); + + assertNull(grid().cache(null).get("key1")); + assertNull(grid().cache(null).get("key2")); + assertNotNull(grid().cache(null).get("key3")); + assertNotNull(grid().cache(null).get("key4")); + + assertTrue(grid().cache(CACHE_NAME).putx("key1", "val1")); + assertTrue(grid().cache(CACHE_NAME).putx("key2", "val2")); + assertTrue(grid().cache(CACHE_NAME).putx("key3", "val3")); + assertTrue(grid().cache(CACHE_NAME).putx("key4", "val4")); + + client.cacheRemoveAll(CACHE_NAME, "key1", "key2"); + + assertNull(grid().cache(CACHE_NAME).get("key1")); + assertNull(grid().cache(CACHE_NAME).get("key2")); + assertNotNull(grid().cache(CACHE_NAME).get("key3")); + assertNotNull(grid().cache(CACHE_NAME).get("key4")); + } + + /** + * @throws Exception If failed. + */ + public void testReplace() throws Exception { + assertFalse(client.cacheReplace(null, "key1", "val1")); + assertTrue(grid().cache(null).putx("key1", "val1")); + assertTrue(client.cacheReplace(null, "key1", "val2")); + + assertFalse(client.cacheReplace(null, "key2", "val1")); + assertTrue(grid().cache(null).putx("key2", "val1")); + assertTrue(client.cacheReplace(null, "key2", "val2")); + + grid().cache(null).clearAll(); + + assertFalse(client.cacheReplace(CACHE_NAME, "key1", "val1")); + assertTrue(grid().cache(CACHE_NAME).putx("key1", "val1")); + assertTrue(client.cacheReplace(CACHE_NAME, "key1", "val2")); + } + + /** + * @throws Exception If failed. + */ + public void testCompareAndSet() throws Exception { + assertFalse(client.cacheCompareAndSet(null, "key", null, null)); + assertTrue(grid().cache(null).putx("key", "val")); + assertTrue(client.cacheCompareAndSet(null, "key", null, null)); + assertNull(grid().cache(null).get("key")); + + assertFalse(client.cacheCompareAndSet(null, "key", null, "val")); + assertTrue(grid().cache(null).putx("key", "val")); + assertFalse(client.cacheCompareAndSet(null, "key", null, "wrongVal")); + assertEquals("val", grid().cache(null).get("key")); + assertTrue(client.cacheCompareAndSet(null, "key", null, "val")); + assertNull(grid().cache(null).get("key")); + + assertTrue(client.cacheCompareAndSet(null, "key", "val", null)); + assertEquals("val", grid().cache(null).get("key")); + assertFalse(client.cacheCompareAndSet(null, "key", "newVal", null)); + assertEquals("val", grid().cache(null).get("key")); + assertTrue(grid().cache(null).removex("key")); + + assertFalse(client.cacheCompareAndSet(null, "key", "val1", "val2")); + assertTrue(grid().cache(null).putx("key", "val2")); + assertFalse(client.cacheCompareAndSet(null, "key", "val1", "wrongVal")); + assertEquals("val2", grid().cache(null).get("key")); + assertTrue(client.cacheCompareAndSet(null, "key", "val1", "val2")); + assertEquals("val1", grid().cache(null).get("key")); + assertTrue(grid().cache(null).removex("key")); + + assertFalse(client.cacheCompareAndSet(CACHE_NAME, "key", null, null)); + assertTrue(grid().cache(CACHE_NAME).putx("key", "val")); + assertTrue(client.cacheCompareAndSet(CACHE_NAME, "key", null, null)); + assertNull(grid().cache(CACHE_NAME).get("key")); + + assertFalse(client.cacheCompareAndSet(CACHE_NAME, "key", null, "val")); + assertTrue(grid().cache(CACHE_NAME).putx("key", "val")); + assertFalse(client.cacheCompareAndSet(CACHE_NAME, "key", null, "wrongVal")); + assertEquals("val", grid().cache(CACHE_NAME).get("key")); + assertTrue(client.cacheCompareAndSet(CACHE_NAME, "key", null, "val")); + assertNull(grid().cache(CACHE_NAME).get("key")); + + assertTrue(client.cacheCompareAndSet(CACHE_NAME, "key", "val", null)); + assertEquals("val", grid().cache(CACHE_NAME).get("key")); + assertFalse(client.cacheCompareAndSet(CACHE_NAME, "key", "newVal", null)); + assertEquals("val", grid().cache(CACHE_NAME).get("key")); + assertTrue(grid().cache(CACHE_NAME).removex("key")); + + assertFalse(client.cacheCompareAndSet(CACHE_NAME, "key", "val1", "val2")); + assertTrue(grid().cache(CACHE_NAME).putx("key", "val2")); + assertFalse(client.cacheCompareAndSet(CACHE_NAME, "key", "val1", "wrongVal")); + assertEquals("val2", grid().cache(CACHE_NAME).get("key")); + assertTrue(client.cacheCompareAndSet(CACHE_NAME, "key", "val1", "val2")); + assertEquals("val1", grid().cache(CACHE_NAME).get("key")); + assertTrue(grid().cache(CACHE_NAME).removex("key")); + } + + /** + * @throws Exception If failed. + */ + public void testMetrics() throws Exception { - grid().cache(null).resetMetrics(); - grid().cache(CACHE_NAME).resetMetrics(); ++ grid().cache(null).mxBean().clear(); ++ grid().cache(CACHE_NAME).mxBean().clear(); + + grid().cache(null).putx("key1", "val"); + grid().cache(null).putx("key2", "val"); + grid().cache(null).putx("key2", "val"); + + grid().cache(null).get("key1"); + grid().cache(null).get("key2"); + grid().cache(null).get("key2"); + + grid().cache(CACHE_NAME).putx("key1", "val"); + grid().cache(CACHE_NAME).putx("key2", "val"); + grid().cache(CACHE_NAME).putx("key2", "val"); + + grid().cache(CACHE_NAME).get("key1"); + grid().cache(CACHE_NAME).get("key2"); + grid().cache(CACHE_NAME).get("key2"); + + Map<String, Long> m = client.cacheMetrics(null); + + assertNotNull(m); - assertEquals(7, m.size()); ++ assertEquals(4, m.size()); + assertEquals(3, m.get("reads").longValue()); + assertEquals(3, m.get("writes").longValue()); + + m = client.cacheMetrics(CACHE_NAME); + + assertNotNull(m); - assertEquals(7, m.size()); ++ assertEquals(4, m.size()); + assertEquals(3, m.get("reads").longValue()); + assertEquals(3, m.get("writes").longValue()); + } + + /** + * @throws Exception If failed. + */ + public void testAppend() throws Exception { + grid().cache(null).remove("key"); + grid().cache(CACHE_NAME).remove("key"); + + assertFalse(client.cacheAppend(null, "key", ".val")); + assertFalse(client.cacheAppend(CACHE_NAME, "key", ".val")); + + grid().cache(null).put("key", "orig"); + grid().cache(CACHE_NAME).put("key", "orig"); + + assertTrue(client.cacheAppend(null, "key", ".val")); + assertEquals("orig.val", grid().cache(null).get("key")); + assertTrue(client.cacheAppend(null, "key", ".newVal")); + assertEquals("orig.val.newVal", grid().cache(null).get("key")); + + assertTrue(client.cacheAppend(CACHE_NAME, "key", ".val")); + assertEquals("orig.val", grid().cache(CACHE_NAME).get("key")); + assertTrue(client.cacheAppend(CACHE_NAME, "key", ".newVal")); + assertEquals("orig.val.newVal", grid().cache(CACHE_NAME).get("key")); + } + + /** + * @throws Exception If failed. + */ + public void testPrepend() throws Exception { + grid().cache(null).remove("key"); + grid().cache(CACHE_NAME).remove("key"); + + assertFalse(client.cachePrepend(null, "key", ".val")); + assertFalse(client.cachePrepend(CACHE_NAME, "key", ".val")); + + grid().cache(null).put("key", "orig"); + grid().cache(CACHE_NAME).put("key", "orig"); + + assertTrue(client.cachePrepend(null, "key", "val.")); + assertEquals("val.orig", grid().cache(null).get("key")); + assertTrue(client.cachePrepend(null, "key", "newVal.")); + assertEquals("newVal.val.orig", grid().cache(null).get("key")); + + assertTrue(client.cachePrepend(CACHE_NAME, "key", "val.")); + assertEquals("val.orig", grid().cache(CACHE_NAME).get("key")); + assertTrue(client.cachePrepend(CACHE_NAME, "key", "newVal.")); + assertEquals("newVal.val.orig", grid().cache(CACHE_NAME).get("key")); + } + + /** + * @throws Exception If failed. + */ + public void testExecute() throws Exception { + GridClientTaskResultBean res = client.execute(TestTask.class.getName(), + Arrays.asList("executing", 3, "test", 5, "task")); + + assertTrue(res.isFinished()); + assertEquals(25, res.getResult()); + } + + /** + * @throws Exception If failed. + */ + public void testNode() throws Exception { + assertNull(client.node(UUID.randomUUID(), false, false)); + assertNull(client.node("wrongHost", false, false)); + + GridClientNodeBean node = client.node(grid().localNode().id(), true, true); + + assertNotNull(node); + assertFalse(node.getAttributes().isEmpty()); + assertNotNull(node.getMetrics()); + assertNotNull(node.getTcpAddresses()); + assertEquals(PORT, node.getTcpPort()); + assertEquals(grid().localNode().id(), node.getNodeId()); + + node = client.node(grid().localNode().id(), false, false); + + assertNotNull(node); + assertNull(node.getAttributes()); + assertNull(node.getMetrics()); + assertNotNull(node.getTcpAddresses()); + assertEquals(PORT, node.getTcpPort()); + assertEquals(grid().localNode().id(), node.getNodeId()); + + node = client.node(HOST, true, true); + + assertNotNull(node); + assertFalse(node.getAttributes().isEmpty()); + assertNotNull(node.getMetrics()); + assertNotNull(node.getTcpAddresses()); + assertEquals(PORT, node.getTcpPort()); + assertEquals(grid().localNode().id(), node.getNodeId()); + + node = client.node(HOST, false, false); + + assertNotNull(node); + assertNull(node.getAttributes()); + assertNull(node.getMetrics()); + assertNotNull(node.getTcpAddresses()); + assertEquals(PORT, node.getTcpPort()); + assertEquals(grid().localNode().id(), node.getNodeId()); + } + + /** + * @throws Exception If failed. + */ + public void testTopology() throws Exception { + List<GridClientNodeBean> top = client.topology(true, true); + + assertNotNull(top); + assertEquals(1, top.size()); + + GridClientNodeBean node = F.first(top); + + assertNotNull(node); + assertFalse(node.getAttributes().isEmpty()); + assertNotNull(node.getMetrics()); + assertNotNull(node.getTcpAddresses()); + assertEquals(grid().localNode().id(), node.getNodeId()); + + top = client.topology(false, false); + + assertNotNull(top); + assertEquals(1, top.size()); + + node = F.first(top); + + assertNotNull(node); + assertNull(node.getAttributes()); + assertNull(node.getMetrics()); + assertNotNull(node.getTcpAddresses()); + assertEquals(grid().localNode().id(), node.getNodeId()); + } + + /** + * @throws Exception If failed. + */ + public void testLog() throws Exception { + String path = "work/log/gridgain.log." + System.currentTimeMillis(); + + File file = new File(U.getGridGainHome(), path); + + assert !file.exists(); + + FileWriter writer = new FileWriter(file); + + String sep = System.getProperty("line.separator"); + + writer.write("Line 1" + sep); + writer.write(sep); + writer.write("Line 2" + sep); + writer.write("Line 3" + sep); + + writer.flush(); + writer.close(); + + List<String> log = client.log(path, 0, 10); + + assertNotNull(log); + assertEquals(4, log.size()); + + file.delete(); + + GridTestUtils.assertThrows( + log(), + new Callable<Object>() { + @Override + public Object call() throws Exception { + client.log("wrong/path", 0, 10); + + return null; + } + }, + IgniteCheckedException.class, + null + ); + } + + /** + * Test task. + */ + private static class TestTask extends ComputeTaskSplitAdapter<List<Object>, Integer> { + /** {@inheritDoc} */ + @Override protected Collection<? extends ComputeJob> split(int gridSize, List<Object> args) + throws IgniteCheckedException { + Collection<ComputeJobAdapter> jobs = new ArrayList<>(args.size()); + + for (final Object arg : args) { + jobs.add(new ComputeJobAdapter() { + @SuppressWarnings("OverlyStrongTypeCast") + @Override public Object execute() { + try { + return ((String)arg).length(); + } + catch (ClassCastException ignored) { + assert arg instanceof Integer; + + return arg; + } + } + }); + } + + return jobs; + } + + /** {@inheritDoc} */ + @Override public Integer reduce(List<ComputeJobResult> results) throws IgniteCheckedException { + int sum = 0; + + for (ComputeJobResult res : results) + sum += res.<Integer>getData(); + + return sum; + } + } + }
http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7a9a1db/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/RestMemcacheProtocolSelfTest.java ---------------------------------------------------------------------- diff --cc modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/RestMemcacheProtocolSelfTest.java index 0000000,a97e5f9..a996352 mode 000000,100644..100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/RestMemcacheProtocolSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/RestMemcacheProtocolSelfTest.java @@@ -1,0 -1,331 +1,332 @@@ + /* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + package org.apache.ignite.internal.processors.rest; + + import junit.framework.*; + import org.apache.ignite.*; + import org.apache.ignite.cache.*; + import org.apache.ignite.configuration.*; + import org.apache.ignite.spi.discovery.tcp.*; + import org.apache.ignite.spi.discovery.tcp.ipfinder.*; + import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.*; + import org.apache.ignite.testframework.junits.common.*; + import org.jetbrains.annotations.*; + + import java.util.*; + + import static org.apache.ignite.cache.CacheMode.*; + import static org.apache.ignite.cache.CacheWriteSynchronizationMode.*; + + /** + * TCP protocol test. + */ + @SuppressWarnings("unchecked") + public class RestMemcacheProtocolSelfTest extends GridCommonAbstractTest { + /** */ + private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); + + /** */ + private static final String CACHE_NAME = "cache"; + + /** */ + private static final String HOST = "127.0.0.1"; + + /** */ + private static final int PORT = 11212; + + /** */ + private TestMemcacheClient client; + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + startGrid(); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + stopAllGrids(); + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + client = client(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + client.shutdown(); + + grid().cache(null).clearAll(); + grid().cache(CACHE_NAME).clearAll(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(gridName); + + cfg.setLocalHost(HOST); + + assert cfg.getClientConnectionConfiguration() == null; + + ClientConnectionConfiguration clientCfg = new ClientConnectionConfiguration(); + + clientCfg.setRestTcpPort(PORT); + + cfg.setClientConnectionConfiguration(clientCfg); + + TcpDiscoverySpi disco = new TcpDiscoverySpi(); + + disco.setIpFinder(IP_FINDER); + + cfg.setDiscoverySpi(disco); + + cfg.setCacheConfiguration(cacheConfiguration(null), cacheConfiguration(CACHE_NAME)); + + return cfg; + } + + /** + * @param cacheName Cache name. + * @return Cache configuration. + * @throws Exception In case of error. + */ + private CacheConfiguration cacheConfiguration(@Nullable String cacheName) throws Exception { + CacheConfiguration cfg = defaultCacheConfiguration(); + + cfg.setCacheMode(LOCAL); + cfg.setName(cacheName); + cfg.setWriteSynchronizationMode(FULL_SYNC); ++ cfg.setStatisticsEnabled(true); + + return cfg; + } + + /** + * @return Client. + * @throws IgniteCheckedException In case of error. + */ + private TestMemcacheClient client() throws IgniteCheckedException { + return new TestMemcacheClient(HOST, PORT); + } + + /** + * @throws Exception If failed. + */ + public void testPut() throws Exception { + assertTrue(client.cachePut(null, "key1", "val1")); + assertEquals("val1", grid().cache(null).get("key1")); + + assertTrue(client.cachePut(CACHE_NAME, "key1", "val1")); + assertEquals("val1", grid().cache(CACHE_NAME).get("key1")); + } + + /** + * @throws Exception If failed. + */ + public void testGet() throws Exception { + assertTrue(grid().cache(null).putx("key", "val")); + + Assert.assertEquals("val", client.cacheGet(null, "key")); + + assertTrue(grid().cache(CACHE_NAME).putx("key", "val")); + + Assert.assertEquals("val", client.cacheGet(CACHE_NAME, "key")); + } + + /** + * @throws Exception If failed. + */ + public void testRemove() throws Exception { + assertTrue(grid().cache(null).putx("key", "val")); + + assertTrue(client.cacheRemove(null, "key")); + assertFalse(client.cacheRemove(null, "wrongKey")); + + assertNull(grid().cache(null).get("key")); + + assertTrue(grid().cache(CACHE_NAME).putx("key", "val")); + + assertTrue(client.cacheRemove(CACHE_NAME, "key")); + assertFalse(client.cacheRemove(CACHE_NAME, "wrongKey")); + + assertNull(grid().cache(CACHE_NAME).get("key")); + } + + /** + * @throws Exception If failed. + */ + public void testAdd() throws Exception { + assertTrue(client.cacheAdd(null, "key", "val")); + assertEquals("val", grid().cache(null).get("key")); + assertFalse(client.cacheAdd(null, "key", "newVal")); + assertEquals("val", grid().cache(null).get("key")); + + assertTrue(client.cacheAdd(CACHE_NAME, "key", "val")); + assertEquals("val", grid().cache(CACHE_NAME).get("key")); + assertFalse(client.cacheAdd(CACHE_NAME, "key", "newVal")); + assertEquals("val", grid().cache(CACHE_NAME).get("key")); + } + + /** + * @throws Exception If failed. + */ + public void testReplace() throws Exception { + assertFalse(client.cacheReplace(null, "key1", "val1")); + assertTrue(grid().cache(null).putx("key1", "val1")); + assertTrue(client.cacheReplace(null, "key1", "val2")); + + assertFalse(client.cacheReplace(null, "key2", "val1")); + assertTrue(grid().cache(null).putx("key2", "val1")); + assertTrue(client.cacheReplace(null, "key2", "val2")); + + grid().cache(null).clearAll(); + + assertFalse(client.cacheReplace(CACHE_NAME, "key1", "val1")); + assertTrue(grid().cache(CACHE_NAME).putx("key1", "val1")); + assertTrue(client.cacheReplace(CACHE_NAME, "key1", "val2")); + } + + /** + * @throws Exception If failed. + */ + public void testMetrics() throws Exception { - grid().cache(null).resetMetrics(); - grid().cache(CACHE_NAME).resetMetrics(); ++ grid().cache(null).mxBean().clear(); ++ grid().cache(CACHE_NAME).mxBean().clear(); + + grid().cache(null).putx("key1", "val"); + grid().cache(null).putx("key2", "val"); + grid().cache(null).putx("key2", "val"); + + grid().cache(null).get("key1"); + grid().cache(null).get("key2"); + grid().cache(null).get("key2"); + + grid().cache(CACHE_NAME).putx("key1", "val"); + grid().cache(CACHE_NAME).putx("key2", "val"); + grid().cache(CACHE_NAME).putx("key2", "val"); + + grid().cache(CACHE_NAME).get("key1"); + grid().cache(CACHE_NAME).get("key2"); + grid().cache(CACHE_NAME).get("key2"); + + Map<String, Long> m = client.cacheMetrics(null); + + assertNotNull(m); - assertEquals(7, m.size()); ++ assertEquals(4, m.size()); + assertEquals(3, m.get("reads").longValue()); + assertEquals(3, m.get("writes").longValue()); + + m = client.cacheMetrics(CACHE_NAME); + + assertNotNull(m); - assertEquals(7, m.size()); ++ assertEquals(4, m.size()); + assertEquals(3, m.get("reads").longValue()); + assertEquals(3, m.get("writes").longValue()); + } + + /** + * @throws Exception If failed. + */ + public void testIncrement() throws Exception { + assertEquals(15L, client().cacheIncrement(null, "key", 10L, 5L)); + assertEquals(15L, grid().cache(null).dataStructures().atomicLong("key", 0, true).get()); + assertEquals(18L, client().cacheIncrement(null, "key", 20L, 3L)); + assertEquals(18L, grid().cache(null).dataStructures().atomicLong("key", 0, true).get()); + assertEquals(20L, client().cacheIncrement(null, "key", null, 2L)); + assertEquals(20L, grid().cache(null).dataStructures().atomicLong("key", 0, true).get()); + + assertEquals(15L, client().cacheIncrement(CACHE_NAME, "key", 10L, 5L)); + assertEquals(15L, grid().cache(CACHE_NAME).dataStructures().atomicLong("key", 0, true).get()); + assertEquals(18L, client().cacheIncrement(CACHE_NAME, "key", 20L, 3L)); + assertEquals(18L, grid().cache(CACHE_NAME).dataStructures().atomicLong("key", 0, true).get()); + assertEquals(20L, client().cacheIncrement(CACHE_NAME, "key", null, 2L)); + assertEquals(20L, grid().cache(CACHE_NAME).dataStructures().atomicLong("key", 0, true).get()); + } + + /** + * @throws Exception If failed. + */ + public void testDecrement() throws Exception { + assertEquals(15L, client().cacheDecrement(null, "key", 20L, 5L)); + assertEquals(15L, grid().cache(null).dataStructures().atomicLong("key", 0, true).get()); + assertEquals(12L, client().cacheDecrement(null, "key", 20L, 3L)); + assertEquals(12L, grid().cache(null).dataStructures().atomicLong("key", 0, true).get()); + assertEquals(10L, client().cacheDecrement(null, "key", null, 2L)); + assertEquals(10L, grid().cache(null).dataStructures().atomicLong("key", 0, true).get()); + + assertEquals(15L, client().cacheDecrement(CACHE_NAME, "key", 20L, 5L)); + assertEquals(15L, grid().cache(CACHE_NAME).dataStructures().atomicLong("key", 0, true).get()); + assertEquals(12L, client().cacheDecrement(CACHE_NAME, "key", 20L, 3L)); + assertEquals(12L, grid().cache(CACHE_NAME).dataStructures().atomicLong("key", 0, true).get()); + assertEquals(10L, client().cacheDecrement(CACHE_NAME, "key", null, 2L)); + assertEquals(10L, grid().cache(CACHE_NAME).dataStructures().atomicLong("key", 0, true).get()); + } + + /** + * @throws Exception If failed. + */ + public void testAppend() throws Exception { + assertFalse(client.cacheAppend(null, "wrongKey", "_suffix")); + assertFalse(client.cacheAppend(CACHE_NAME, "wrongKey", "_suffix")); + + assertTrue(grid().cache(null).putx("key", "val")); + assertTrue(client.cacheAppend(null, "key", "_suffix")); + assertEquals("val_suffix", grid().cache(null).get("key")); + + assertTrue(grid().cache(CACHE_NAME).putx("key", "val")); + assertTrue(client.cacheAppend(CACHE_NAME, "key", "_suffix")); + assertEquals("val_suffix", grid().cache(CACHE_NAME).get("key")); + } + + /** + * @throws Exception If failed. + */ + public void testPrepend() throws Exception { + assertFalse(client.cachePrepend(null, "wrongKey", "prefix_")); + assertFalse(client.cachePrepend(CACHE_NAME, "wrongKey", "prefix_")); + + assertTrue(grid().cache(null).putx("key", "val")); + assertTrue(client.cachePrepend(null, "key", "prefix_")); + assertEquals("prefix_val", grid().cache(null).get("key")); + + assertTrue(grid().cache(CACHE_NAME).putx("key", "val")); + assertTrue(client.cachePrepend(CACHE_NAME, "key", "prefix_")); + assertEquals("prefix_val", grid().cache(CACHE_NAME).get("key")); + } + + /** + * @throws Exception If failed. + */ + public void testVersion() throws Exception { + assertNotNull(client.version()); + } + + /** + * @throws Exception If failed. + */ + public void testNoop() throws Exception { + client.noop(); + } + + /** + * @throws Exception If failed. + */ + public void testQuit() throws Exception { + client.quit(); + } + } http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7a9a1db/modules/core/src/main/java/org/apache/ignite/IgniteCache.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7a9a1db/modules/core/src/main/java/org/apache/ignite/IgniteCacheManager.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7a9a1db/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java ---------------------------------------------------------------------- diff --cc modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java index 0000000,c0dafdc..5fd2844 mode 000000,100644..100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java @@@ -1,0 -1,103 +1,368 @@@ + /* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + package org.apache.ignite.cache; + + import java.io.*; + + /** + * Cache metrics used to obtain statistics on cache itself. - * Use {@link GridCache#metrics()} to obtain metrics for a cache. ++ * Use {@link org.apache.ignite.IgniteCache#metrics()} to obtain metrics for a cache. + */ -public interface CacheMetrics extends Serializable { ++public interface CacheMetrics { + /** - * Gets create time of the owning entity (either cache or entry). ++ * The number of get requests that were satisfied by the cache. + * - * @return Create time. ++ * @return the number of hits + */ - public long createTime(); ++ long getCacheHits(); + + /** - * Gets last write time of the owning entity (either cache or entry). ++ * This is a measure of cache efficiency. + * - * @return Last write time. ++ * @return the percentage of successful hits, as a decimal e.g 75. + */ - public long writeTime(); ++ float getCacheHitPercentage(); + + /** - * Gets last read time of the owning entity (either cache or entry). ++ * A miss is a get request that is not satisfied. + * - * @return Last read time. ++ * @return the number of misses + */ - public long readTime(); ++ long getCacheMisses(); + + /** - * Gets last time transaction was committed. ++ * Returns the percentage of cache accesses that did not find a requested entry ++ * in the cache. + * - * @return Last commit time. ++ * @return the percentage of accesses that failed to find anything + */ - public long commitTime(); ++ float getCacheMissPercentage(); + + /** - * Gets last time transaction was rollback. ++ * The total number of requests to the cache. This will be equal to the sum of ++ * the hits and misses. + * - * @return Last rollback time. ++ * @return the number of gets + */ - public long rollbackTime(); ++ long getCacheGets(); + + /** - * Gets total number of reads of the owning entity (either cache or entry). ++ * The total number of puts to the cache. + * - * @return Total number of reads. ++ * @return the number of puts + */ - public int reads(); ++ long getCachePuts(); + + /** - * Gets total number of writes of the owning entity (either cache or entry). ++ * The total number of removals from the cache. This does not include evictions, ++ * where the cache itself initiates the removal to make space. + * - * @return Total number of writes. ++ * @return the number of removals + */ - public int writes(); ++ long getCacheRemovals(); + + /** - * Gets total number of hits for the owning entity (either cache or entry). ++ * The total number of evictions from the cache. An eviction is a removal ++ * initiated by the cache itself to free up space. An eviction is not treated as ++ * a removal and does not appear in the removal counts. + * - * @return Number of hits. ++ * @return the number of evictions + */ - public int hits(); ++ long getCacheEvictions(); + + /** - * Gets total number of misses for the owning entity (either cache or entry). ++ * The mean time to execute gets. + * - * @return Number of misses. ++ * @return the time in µs + */ - public int misses(); ++ float getAverageGetTime(); ++ ++ /** ++ * The mean time to execute puts. ++ * ++ * @return the time in µs ++ */ ++ float getAveragePutTime(); ++ ++ /** ++ * The mean time to execute removes. ++ * ++ * @return the time in µs ++ */ ++ float getAverageRemoveTime(); ++ ++ ++ /** ++ * The mean time to execute tx commit. ++ * ++ * @return the time in µs ++ */ ++ public float getAverageTxCommitTime(); ++ ++ /** ++ * The mean time to execute tx rollbacks. ++ * ++ * @return Number of transaction rollbacks. ++ */ ++ public float getAverageTxRollbackTime(); ++ + + /** + * Gets total number of transaction commits. + * + * @return Number of transaction commits. + */ - public int txCommits(); ++ public long getCacheTxCommits(); + + /** + * Gets total number of transaction rollbacks. + * + * @return Number of transaction rollbacks. + */ - public int txRollbacks(); ++ public long getCacheTxRollbacks(); ++ ++ /** ++ * Gets name of this cache. ++ * ++ * @return Cache name. ++ */ ++ public String name(); ++ ++ /** ++ * Gets number of entries that was swapped to disk. ++ * ++ * @return Number of entries that was swapped to disk. ++ */ ++ public long getOverflowSize(); ++ ++ /** ++ * Gets number of entries stored in off-heap memory. ++ * ++ * @return Number of entries stored in off-heap memory. ++ */ ++ public long getOffHeapEntriesCount(); ++ ++ /** ++ * Gets memory size allocated in off-heap. ++ * ++ * @return Memory size allocated in off-heap. ++ */ ++ public long getOffHeapAllocatedSize(); ++ ++ /** ++ * Returns number of non-{@code null} values in the cache. ++ * ++ * @return Number of non-{@code null} values in the cache. ++ */ ++ public int getSize(); ++ ++ /** ++ * Gets number of keys in the cache, possibly with {@code null} values. ++ * ++ * @return Number of keys in the cache. ++ */ ++ public int getKeySize(); ++ ++ /** ++ * Returns {@code true} if this cache is empty. ++ * ++ * @return {@code true} if this cache is empty. ++ */ ++ public boolean isEmpty(); ++ ++ /** ++ * Gets current size of evict queue used to batch up evictions. ++ * ++ * @return Current size of evict queue. ++ */ ++ public int getDhtEvictQueueCurrentSize(); ++ ++ /** ++ * Gets transaction per-thread map size. ++ * ++ * @return Thread map size. ++ */ ++ public int getTxThreadMapSize(); ++ ++ /** ++ * Gets transaction per-Xid map size. ++ * ++ * @return Transaction per-Xid map size. ++ */ ++ public int getTxXidMapSize(); ++ ++ /** ++ * Gets committed transaction queue size. ++ * ++ * @return Committed transaction queue size. ++ */ ++ public int getTxCommitQueueSize(); ++ ++ /** ++ * Gets prepared transaction queue size. ++ * ++ * @return Prepared transaction queue size. ++ */ ++ public int getTxPrepareQueueSize(); ++ ++ /** ++ * Gets start version counts map size. ++ * ++ * @return Start version counts map size. ++ */ ++ public int getTxStartVersionCountsSize(); ++ ++ /** ++ * Gets number of cached committed transaction IDs. ++ * ++ * @return Number of cached committed transaction IDs. ++ */ ++ public int getTxCommittedVersionsSize(); ++ ++ /** ++ * Gets number of cached rolled back transaction IDs. ++ * ++ * @return Number of cached rolled back transaction IDs. ++ */ ++ public int getTxRolledbackVersionsSize(); ++ ++ /** ++ * Gets transaction DHT per-thread map size. ++ * ++ * @return DHT thread map size. ++ */ ++ public int getTxDhtThreadMapSize(); ++ ++ /** ++ * Gets transaction DHT per-Xid map size. ++ * ++ * @return Transaction DHT per-Xid map size. ++ */ ++ public int getTxDhtXidMapSize(); ++ ++ /** ++ * Gets committed DHT transaction queue size. ++ * ++ * @return Committed DHT transaction queue size. ++ */ ++ public int getTxDhtCommitQueueSize(); ++ ++ /** ++ * Gets prepared DHT transaction queue size. ++ * ++ * @return Prepared DHT transaction queue size. ++ */ ++ public int getTxDhtPrepareQueueSize(); ++ ++ /** ++ * Gets DHT start version counts map size. ++ * ++ * @return DHT start version counts map size. ++ */ ++ public int getTxDhtStartVersionCountsSize(); ++ ++ /** ++ * Gets number of cached committed DHT transaction IDs. ++ * ++ * @return Number of cached committed DHT transaction IDs. ++ */ ++ public int getTxDhtCommittedVersionsSize(); ++ ++ /** ++ * Gets number of cached rolled back DHT transaction IDs. ++ * ++ * @return Number of cached rolled back DHT transaction IDs. ++ */ ++ public int getTxDhtRolledbackVersionsSize(); ++ ++ /** ++ * Returns {@code True} if write-behind is enabled. ++ * ++ * @return {@code True} if write-behind is enabled. ++ */ ++ public boolean isWriteBehindEnabled(); ++ ++ /** ++ * Gets the maximum size of the write-behind buffer. When the count of unique keys ++ * in write buffer exceeds this value, the buffer is scheduled for write to the underlying store. ++ * <p/> ++ * If this value is {@code 0}, then flush is performed only on time-elapsing basis. However, ++ * when this value is {@code 0}, the cache critical size is set to ++ * {@link CacheConfiguration#DFLT_WRITE_BEHIND_CRITICAL_SIZE} ++ * ++ * @return Buffer size that triggers flush procedure. ++ */ ++ public int getWriteBehindFlushSize(); ++ ++ /** ++ * Gets the number of flush threads that will perform store update operations. ++ * ++ * @return Count of worker threads. ++ */ ++ public int getWriteBehindFlushThreadCount(); ++ ++ /** ++ * Gets the cache flush frequency. All pending operations on the underlying store will be performed ++ * within time interval not less then this value. ++ * <p/> ++ * If this value is {@code 0}, then flush is performed only when buffer size exceeds flush size. ++ * ++ * @return Flush frequency in milliseconds. ++ */ ++ public long getWriteBehindFlushFrequency(); ++ ++ /** ++ * Gets the maximum count of similar (put or remove) operations that can be grouped to a single batch. ++ * ++ * @return Maximum size of batch. ++ */ ++ public int getWriteBehindStoreBatchSize(); ++ ++ /** ++ * Gets count of write buffer overflow events since initialization. Each overflow event causes ++ * the ongoing flush operation to be performed synchronously. ++ * ++ * @return Count of cache overflow events since start. ++ */ ++ public int getWriteBehindTotalCriticalOverflowCount(); ++ ++ /** ++ * Gets count of write buffer overflow events in progress at the moment. Each overflow event causes ++ * the ongoing flush operation to be performed synchronously. ++ * ++ * @return Count of cache overflow events since start. ++ */ ++ public int getWriteBehindCriticalOverflowCount(); ++ ++ /** ++ * Gets count of cache entries that are in a store-retry state. An entry is assigned a store-retry state ++ * when underlying store failed due some reason and cache has enough space to retain this entry till ++ * the next try. ++ * ++ * @return Count of entries in store-retry state. ++ */ ++ public int getWriteBehindErrorRetryCount(); ++ ++ /** ++ * Gets count of entries that were processed by the write-behind store and have not been ++ * flushed to the underlying store yet. ++ * ++ * @return Total count of entries in cache store internal buffer. ++ */ ++ public int getWriteBehindBufferSize(); + } http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7a9a1db/modules/core/src/main/java/org/apache/ignite/cache/GridCache.java ---------------------------------------------------------------------- diff --cc modules/core/src/main/java/org/apache/ignite/cache/GridCache.java index 0000000,d86b221..f1348f8 mode 000000,100644..100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/GridCache.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/GridCache.java @@@ -1,0 -1,278 +1,282 @@@ + /* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + package org.apache.ignite.cache; + + import org.apache.ignite.*; + import org.apache.ignite.cache.affinity.*; + import org.apache.ignite.cache.datastructures.*; -import org.apache.ignite.cache.store.CacheStore; ++import org.apache.ignite.cache.store.*; + import org.apache.ignite.lang.*; + import org.apache.ignite.transactions.*; + import org.jetbrains.annotations.*; + + import java.util.*; + + /** + * Main entry point for all <b>Data Grid APIs.</b> You can get a named cache by calling {@link org.apache.ignite.Ignite#cache(String)} + * method. + * <h1 class="header">Functionality</h1> + * This API extends {@link CacheProjection} API which contains vast majority of cache functionality + * and documentation. In addition to {@link CacheProjection} functionality this API provides: + * <ul> + * <li> + * Various {@code 'loadCache(..)'} methods to load cache either synchronously or asynchronously. + * These methods don't specify any keys to load, and leave it to the underlying storage to load cache + * data based on the optionally passed in arguments. + * </li> + * <li> + * Method {@link #affinity()} provides {@link org.apache.ignite.cache.affinity.CacheAffinityFunction} service for information on + * data partitioning and mapping keys to grid nodes responsible for caching those keys. + * </li> + * <li> + * Method {@link #dataStructures()} provides {@link org.apache.ignite.cache.datastructures.CacheDataStructures} service for + * creating and working with distributed concurrent data structures, such as + * {@link org.apache.ignite.cache.datastructures.CacheAtomicLong}, {@link org.apache.ignite.cache.datastructures.CacheAtomicReference}, {@link org.apache.ignite.cache.datastructures.CacheQueue}, etc. + * </li> + * <li> + * Methods like {@code 'tx{Un}Synchronize(..)'} witch allow to get notifications for transaction state changes. + * This feature is very useful when integrating cache transactions with some other in-house transactions. + * </li> + * <li>Method {@link #metrics()} to provide metrics for the whole cache.</li> + * <li>Method {@link #configuration()} to provide cache configuration bean.</li> + * </ul> + * + * @param <K> Cache key type. + * @param <V> Cache value type. + */ + public interface GridCache<K, V> extends CacheProjection<K, V> { + /** + * Gets configuration bean for this cache. + * + * @return Configuration bean for this cache. + */ + public CacheConfiguration configuration(); + + /** + * Registers transactions synchronizations for all transactions started by this cache. + * Use it whenever you need to get notifications on transaction lifecycle and possibly change + * its course. It is also particularly useful when integrating cache transactions + * with some other in-house transactions. + * + * @param syncs Transaction synchronizations to register. + */ + public void txSynchronize(@Nullable IgniteTxSynchronization syncs); + + /** + * Removes transaction synchronizations. + * + * @param syncs Transactions synchronizations to remove. + * @see #txSynchronize(IgniteTxSynchronization) + */ + public void txUnsynchronize(@Nullable IgniteTxSynchronization syncs); + + /** + * Gets registered transaction synchronizations. + * + * @return Registered transaction synchronizations. + * @see #txSynchronize(IgniteTxSynchronization) + */ + public Collection<IgniteTxSynchronization> txSynchronizations(); + + /** + * Gets affinity service to provide information about data partitioning + * and distribution. + * + * @return Cache data affinity service. + */ + public CacheAffinity<K> affinity(); + + /** + * Gets data structures service to provide a gateway for creating various + * distributed data structures similar in APIs to {@code java.util.concurrent} package. + * + * @return Cache data structures service. + */ + public CacheDataStructures dataStructures(); + ++ + /** + * Gets metrics (statistics) for this cache. + * + * @return Cache metrics. + */ + public CacheMetrics metrics(); + ++ ++ /** ++ * Gets metrics (statistics) for this cache. ++ * ++ * @return Cache metrics. ++ */ ++ public IgniteCacheMxBean mxBean(); ++ + /** + * Gets size (in bytes) of all entries swapped to disk. + * + * @return Size (in bytes) of all entries swapped to disk. + * @throws IgniteCheckedException In case of error. + */ + public long overflowSize() throws IgniteCheckedException; + + /** + * Gets number of cache entries stored in off-heap memory. + * + * @return Number of cache entries stored in off-heap memory. + */ + public long offHeapEntriesCount(); + + /** + * Gets memory size allocated in off-heap. + * + * @return Allocated memory size. + */ + public long offHeapAllocatedSize(); + + /** + * Gets size in bytes for swap space. + * + * @return Size in bytes. + * @throws IgniteCheckedException If failed. + */ + public long swapSize() throws IgniteCheckedException; + + /** + * Gets number of swap entries (keys). + * + * @return Number of entries stored in swap. + * @throws IgniteCheckedException If failed. + */ + public long swapKeys() throws IgniteCheckedException; + + /** + * Gets iterator over keys and values belonging to this cache swap space on local node. This + * iterator is thread-safe, which means that cache (and therefore its swap space) + * may be modified concurrently with iteration over swap. + * <p> + * Returned iterator supports {@code remove} operation which delegates to + * {@link #removex(Object, org.apache.ignite.lang.IgnitePredicate[])} method. + * <h2 class="header">Cache Flags</h2> + * This method is not available if any of the following flags are set on projection: + * {@link CacheFlag#SKIP_SWAP}. + * + * @return Iterator over keys. + * @throws IgniteCheckedException If failed. + * @see #promote(Object) + */ + public Iterator<Map.Entry<K, V>> swapIterator() throws IgniteCheckedException; + + /** + * Gets iterator over keys and values belonging to this cache off-heap memory on local node. This + * iterator is thread-safe, which means that cache (and therefore its off-heap memory) + * may be modified concurrently with iteration over off-heap. To achieve better performance + * the keys and values deserialized on demand, whenever accessed. + * <p> + * Returned iterator supports {@code remove} operation which delegates to + * {@link #removex(Object, org.apache.ignite.lang.IgnitePredicate[])} method. + * + * @return Iterator over keys. + * @throws IgniteCheckedException If failed. + */ + public Iterator<Map.Entry<K, V>> offHeapIterator() throws IgniteCheckedException; + + /** + * Delegates to {@link CacheStore#loadCache(org.apache.ignite.lang.IgniteBiInClosure,Object...)} method + * to load state from the underlying persistent storage. The loaded values + * will then be given to the optionally passed in predicate, and, if the predicate returns + * {@code true}, will be stored in cache. If predicate is {@code null}, then + * all loaded values will be stored in cache. + * <p> + * Note that this method does not receive keys as a parameter, so it is up to + * {@link CacheStore} implementation to provide all the data to be loaded. + * <p> + * This method is not transactional and may end up loading a stale value into + * cache if another thread has updated the value immediately after it has been + * loaded. It is mostly useful when pre-loading the cache from underlying + * data store before start, or for read-only caches. + * + * @param p Optional predicate (may be {@code null}). If provided, will be used to + * filter values to be put into cache. + * @param ttl Time to live for loaded entries ({@code 0} for infinity). + * @param args Optional user arguments to be passed into + * {@link CacheStore#loadCache(org.apache.ignite.lang.IgniteBiInClosure, Object...)} method. + * @throws IgniteCheckedException If loading failed. + */ + public void loadCache(@Nullable IgniteBiPredicate<K, V> p, long ttl, @Nullable Object... args) throws IgniteCheckedException; + + /** + * Asynchronously delegates to {@link CacheStore#loadCache(org.apache.ignite.lang.IgniteBiInClosure, Object...)} method + * to reload state from the underlying persistent storage. The reloaded values + * will then be given to the optionally passed in predicate, and if the predicate returns + * {@code true}, will be stored in cache. If predicate is {@code null}, then + * all reloaded values will be stored in cache. + * <p> + * Note that this method does not receive keys as a parameter, so it is up to + * {@link CacheStore} implementation to provide all the data to be loaded. + * <p> + * This method is not transactional and may end up loading a stale value into + * cache if another thread has updated the value immediately after it has been + * loaded. It is mostly useful when pre-loading the cache from underlying + * data store before start, or for read-only caches. + * + * @param p Optional predicate (may be {@code null}). If provided, will be used to + * filter values to be put into cache. + * @param ttl Time to live for loaded entries ({@code 0} for infinity). + * @param args Optional user arguments to be passed into + * {@link CacheStore#loadCache(org.apache.ignite.lang.IgniteBiInClosure,Object...)} method. + * @return Future to be completed whenever loading completes. + */ + public IgniteFuture<?> loadCacheAsync(@Nullable IgniteBiPredicate<K, V> p, long ttl, @Nullable Object... args); + + /** + * Gets a random entry out of cache. In the worst cache scenario this method + * has complexity of <pre>O(S * N/64)</pre> where {@code N} is the size of internal hash + * table and {@code S} is the number of hash table buckets to sample, which is {@code 5} + * by default. However, if the table is pretty dense, with density factor of {@code N/64}, + * which is true for near fully populated caches, this method will generally perform significantly + * faster with complexity of O(S) where {@code S = 5}. + * <p> + * Note that this method is not available on {@link CacheProjection} API since it is + * impossible (or very hard) to deterministically return a number value when pre-filtering + * and post-filtering is involved (e.g. projection level predicate filters). + * + * @return Random entry, or {@code null} if cache is empty. + */ + @Nullable public CacheEntry<K, V> randomEntry(); + + /** + * Forces this cache node to re-balance its partitions. This method is usually used when + * {@link CacheConfiguration#getPreloadPartitionedDelay()} configuration parameter has non-zero value. + * When many nodes are started or stopped almost concurrently, it is more efficient to delay + * preloading until the node topology is stable to make sure that no redundant re-partitioning + * happens. + * <p> + * In case of{@link CacheMode#PARTITIONED} caches, for better efficiency user should + * usually make sure that new nodes get placed on the same place of consistent hash ring as + * the left nodes, and that nodes are restarted before + * {@link CacheConfiguration#getPreloadPartitionedDelay() preloadDelay} expires. To place nodes + * on the same place in consistent hash ring, use + * {@link org.apache.ignite.cache.affinity.consistenthash.CacheConsistentHashAffinityFunction#setHashIdResolver(org.apache.ignite.cache.affinity.CacheAffinityNodeHashResolver)} to make sure that + * a node maps to the same hash ID if re-started. + * <p> + * See {@link CacheConfiguration#getPreloadPartitionedDelay()} for more information on how to configure + * preload re-partition delay. + * <p> + * @return Future that will be completed when preloading is finished. + */ + public IgniteFuture<?> forceRepartition(); - - /** - * Resets metrics for current cache. - */ - public void resetMetrics(); + } http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7a9a1db/modules/core/src/main/java/org/apache/ignite/cache/IgniteCacheMxBean.java ---------------------------------------------------------------------- diff --cc modules/core/src/main/java/org/apache/ignite/cache/IgniteCacheMxBean.java index 0000000,0000000..36e946f new file mode 100644 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/cache/IgniteCacheMxBean.java @@@ -1,0 -1,0 +1,222 @@@ ++/* ++ * Licensed to the Apache Software Foundation (ASF) under one or more ++ * contributor license agreements. See the NOTICE file distributed with ++ * this work for additional information regarding copyright ownership. ++ * The ASF licenses this file to You under the Apache License, Version 2.0 ++ * (the "License"); you may not use this file except in compliance with ++ * the License. You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ */ ++ ++package org.apache.ignite.cache; ++ ++import org.apache.ignite.mbean.*; ++import org.apache.ignite.*; ++ ++/** ++ * This interface defines JMX view on {@link IgniteCache}. ++ */ ++import org.apache.ignite.*; ++ ++import javax.cache.management.*; ++ ++/** ++ * This interface defines JMX view on {@link IgniteCache}. ++ */ ++@IgniteMBeanDescription("MBean that provides access to cache descriptor.") ++public interface IgniteCacheMxBean extends CacheStatisticsMXBean, CacheMetrics { ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Clear statistics.") ++ public void clear(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Number of hits.") ++ public long getCacheHits(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Percentage of successful hits.") ++ public float getCacheHitPercentage(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Number of misses.") ++ public long getCacheMisses(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Percentage of accesses that failed to find anything.") ++ public float getCacheMissPercentage(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Number of gets.") ++ public long getCacheGets(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Number of puts.") ++ public long getCachePuts(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Number of removals.") ++ public long getCacheRemovals(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Number of eviction entries.") ++ public long getCacheEvictions(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Average time to execute get.") ++ public float getAverageGetTime(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Average time to execute put.") ++ public float getAveragePutTime(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Average time to execute remove.") ++ public float getAverageRemoveTime(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Average time to commit transaction.") ++ public float getAverageTxCommitTime(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Average time to rollback transaction.") ++ public float getAverageTxRollbackTime(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Number of transaction commits.") ++ public long getCacheTxCommits(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Number of transaction rollback.") ++ public long getCacheTxRollbacks(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Cache name.") ++ public String name(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Number of entries that was swapped to disk.") ++ public long getOverflowSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Number of entries stored in off-heap memory.") ++ public long getOffHeapEntriesCount(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Memory size allocated in off-heap.") ++ public long getOffHeapAllocatedSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Number of non-null values in the cache.") ++ public int getSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Number of keys in the cache (possibly with null values).") ++ public int getKeySize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("True if cache is empty.") ++ public boolean isEmpty(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Current size of evict queue.") ++ public int getDhtEvictQueueCurrentSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Transaction per-thread map size.") ++ public int getTxThreadMapSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Transaction per-Xid map size.") ++ public int getTxXidMapSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Transaction committed queue size.") ++ public int getTxCommitQueueSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Transaction prepared queue size.") ++ public int getTxPrepareQueueSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Transaction start version counts map size.") ++ public int getTxStartVersionCountsSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Transaction committed ID map size.") ++ public int getTxCommittedVersionsSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Transaction rolled back ID map size.") ++ public int getTxRolledbackVersionsSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Transaction DHT per-thread map size.") ++ public int getTxDhtThreadMapSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Transaction DHT per-Xid map size.") ++ public int getTxDhtXidMapSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Transaction DHT committed queue size.") ++ public int getTxDhtCommitQueueSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Transaction DHT prepared queue size.") ++ public int getTxDhtPrepareQueueSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Transaction DHT start version counts map size.") ++ public int getTxDhtStartVersionCountsSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Transaction DHT committed ID map size.") ++ public int getTxDhtCommittedVersionsSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Transaction DHT rolled back ID map size.") ++ public int getTxDhtRolledbackVersionsSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("True if write-behind is enabled for this cache.") ++ public boolean isWriteBehindEnabled(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Size of internal buffer that triggers flush procedure.") ++ public int getWriteBehindFlushSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Count of flush threads.") ++ public int getWriteBehindFlushThreadCount(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Flush frequency interval in milliseconds.") ++ public long getWriteBehindFlushFrequency(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Maximum size of batch for similar operations.") ++ public int getWriteBehindStoreBatchSize(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Count of cache overflow events since write-behind cache has started.") ++ public int getWriteBehindTotalCriticalOverflowCount(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Count of cache overflow events since write-behind cache has started.") ++ public int getWriteBehindCriticalOverflowCount(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Count of cache cache entries that are currently in retry state.") ++ public int getWriteBehindErrorRetryCount(); ++ ++ /** {@inheritDoc} */ ++ @IgniteMBeanDescription("Count of cache entries that are waiting to be flushed.") ++ public int getWriteBehindBufferSize(); ++} http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7a9a1db/modules/core/src/main/java/org/apache/ignite/internal/ClusterLocalNodeMetrics.java ---------------------------------------------------------------------- diff --cc modules/core/src/main/java/org/apache/ignite/internal/ClusterLocalNodeMetrics.java index 0000000,5fc587b..470fa23 mode 000000,100644..100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/ClusterLocalNodeMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/ClusterLocalNodeMetrics.java @@@ -1,0 -1,311 +1,311 @@@ + /* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + package org.apache.ignite.internal; + + import org.apache.ignite.cluster.*; + import org.apache.ignite.internal.util.typedef.internal.*; + + /** + * Local node metrics MBean. + */ -public class ClusterLocalNodeMetrics implements ClusterNodeMetricsMBean { ++public class ClusterLocalNodeMetrics implements ClusterNodeMetricsMxBean { + /** */ + private static final long serialVersionUID = 0L; + + /** Grid node. */ + private final ClusterNode node; + + /** + * @param node Node to manage. + */ + public ClusterLocalNodeMetrics(ClusterNode node) { + assert node != null; + + this.node = node; + } + + /** {@inheritDoc} */ + @Override public int getTotalCpus() { + return node.metrics().getTotalCpus(); + } + + /** {@inheritDoc} */ + @Override public float getAverageActiveJobs() { + return node.metrics().getAverageActiveJobs(); + } + + /** {@inheritDoc} */ + @Override public float getAverageCancelledJobs() { + return node.metrics().getAverageCancelledJobs(); + } + + /** {@inheritDoc} */ + @Override public double getAverageJobExecuteTime() { + return node.metrics().getAverageJobExecuteTime(); + } + + /** {@inheritDoc} */ + @Override public double getAverageJobWaitTime() { + return node.metrics().getAverageJobWaitTime(); + } + + /** {@inheritDoc} */ + @Override public float getAverageRejectedJobs() { + return node.metrics().getAverageRejectedJobs(); + } + + /** {@inheritDoc} */ + @Override public float getAverageWaitingJobs() { + return node.metrics().getAverageWaitingJobs(); + } + + /** {@inheritDoc} */ + @Override public float getBusyTimePercentage() { + return node.metrics().getBusyTimePercentage() * 100; + } + + /** {@inheritDoc} */ + @Override public int getCurrentActiveJobs() { + return node.metrics().getCurrentActiveJobs(); + } + + /** {@inheritDoc} */ + @Override public int getCurrentCancelledJobs() { + return node.metrics().getCurrentCancelledJobs(); + } + + /** {@inheritDoc} */ + @Override public long getCurrentIdleTime() { + return node.metrics().getCurrentIdleTime(); + } + + /** {@inheritDoc} */ + @Override public long getCurrentJobExecuteTime() { + return node.metrics().getCurrentJobExecuteTime(); + } + + /** {@inheritDoc} */ + @Override public long getCurrentJobWaitTime() { + return node.metrics().getCurrentJobWaitTime(); + } + + /** {@inheritDoc} */ + @Override public int getCurrentRejectedJobs() { + return node.metrics().getCurrentRejectedJobs(); + } + + /** {@inheritDoc} */ + @Override public int getCurrentWaitingJobs() { + return node.metrics().getCurrentWaitingJobs(); + } + + /** {@inheritDoc} */ + @Override public int getTotalExecutedTasks() { + return node.metrics().getTotalExecutedTasks(); + } + + /** {@inheritDoc} */ + @Override public int getCurrentDaemonThreadCount() { + return node.metrics().getCurrentDaemonThreadCount(); + } + + /** {@inheritDoc} */ + @Override public long getHeapMemoryCommitted() { + return node.metrics().getHeapMemoryCommitted(); + } + + /** {@inheritDoc} */ + @Override public long getHeapMemoryInitialized() { + return node.metrics().getHeapMemoryInitialized(); + } + + /** {@inheritDoc} */ + @Override public long getHeapMemoryMaximum() { + return node.metrics().getHeapMemoryMaximum(); + } + + /** {@inheritDoc} */ + @Override public long getHeapMemoryUsed() { + return node.metrics().getHeapMemoryUsed(); + } + + /** {@inheritDoc} */ + @Override public float getIdleTimePercentage() { + return node.metrics().getIdleTimePercentage() * 100; + } + + /** {@inheritDoc} */ + @Override public long getLastUpdateTime() { + return node.metrics().getLastUpdateTime(); + } + + /** {@inheritDoc} */ + @Override public int getMaximumActiveJobs() { + return node.metrics().getMaximumActiveJobs(); + } + + /** {@inheritDoc} */ + @Override public int getMaximumCancelledJobs() { + return node.metrics().getMaximumCancelledJobs(); + } + + /** {@inheritDoc} */ + @Override public long getMaximumJobExecuteTime() { + return node.metrics().getMaximumJobExecuteTime(); + } + + /** {@inheritDoc} */ + @Override public long getMaximumJobWaitTime() { + return node.metrics().getMaximumJobWaitTime(); + } + + /** {@inheritDoc} */ + @Override public int getMaximumRejectedJobs() { + return node.metrics().getMaximumRejectedJobs(); + } + + /** {@inheritDoc} */ + @Override public int getMaximumWaitingJobs() { + return node.metrics().getMaximumWaitingJobs(); + } + + /** {@inheritDoc} */ + @Override public long getNonHeapMemoryCommitted() { + return node.metrics().getNonHeapMemoryCommitted(); + } + + /** {@inheritDoc} */ + @Override public long getNonHeapMemoryInitialized() { + return node.metrics().getNonHeapMemoryInitialized(); + } + + /** {@inheritDoc} */ + @Override public long getNonHeapMemoryMaximum() { + return node.metrics().getNonHeapMemoryMaximum(); + } + + /** {@inheritDoc} */ + @Override public long getNonHeapMemoryUsed() { + return node.metrics().getNonHeapMemoryUsed(); + } + + /** {@inheritDoc} */ + @Override public int getMaximumThreadCount() { + return node.metrics().getMaximumThreadCount(); + } + + /** {@inheritDoc} */ + @Override public long getStartTime() { + return node.metrics().getStartTime(); + } + + /** {@inheritDoc} */ + @Override public long getNodeStartTime() { + return node.metrics().getNodeStartTime(); + } + + /** {@inheritDoc} */ + @Override public double getCurrentCpuLoad() { + return node.metrics().getCurrentCpuLoad() * 100; + } + + /** {@inheritDoc} */ + @Override public double getAverageCpuLoad() { + return node.metrics().getAverageCpuLoad() * 100; + } + + /** {@inheritDoc} */ + @Override public double getCurrentGcCpuLoad() { + return node.metrics().getCurrentGcCpuLoad() * 100; + } + + /** {@inheritDoc} */ + @Override public int getCurrentThreadCount() { + return node.metrics().getCurrentThreadCount(); + } + + /** {@inheritDoc} */ + @Override public long getTotalBusyTime() { + return node.metrics().getTotalBusyTime(); + } + + /** {@inheritDoc} */ + @Override public int getTotalCancelledJobs() { + return node.metrics().getTotalCancelledJobs(); + } + + /** {@inheritDoc} */ + @Override public int getTotalExecutedJobs() { + return node.metrics().getTotalExecutedJobs(); + } + + /** {@inheritDoc} */ + @Override public long getTotalIdleTime() { + return node.metrics().getTotalIdleTime(); + } + + /** {@inheritDoc} */ + @Override public int getTotalRejectedJobs() { + return node.metrics().getTotalRejectedJobs(); + } + + /** {@inheritDoc} */ + @Override public long getTotalStartedThreadCount() { + return node.metrics().getTotalStartedThreadCount(); + } + + /** {@inheritDoc} */ + @Override public long getUpTime() { + return node.metrics().getUpTime(); + } + + /** {@inheritDoc} */ + @Override public long getLastDataVersion() { + return node.metrics().getLastDataVersion(); + } + + /** {@inheritDoc} */ + @Override public int getSentMessagesCount() { + return node.metrics().getSentMessagesCount(); + } + + /** {@inheritDoc} */ + @Override public long getSentBytesCount() { + return node.metrics().getSentBytesCount(); + } + + /** {@inheritDoc} */ + @Override public int getReceivedMessagesCount() { + return node.metrics().getReceivedMessagesCount(); + } + + /** {@inheritDoc} */ + @Override public long getReceivedBytesCount() { + return node.metrics().getReceivedBytesCount(); + } + + /** {@inheritDoc} */ + @Override public int getOutboundMessagesQueueSize() { + return node.metrics().getOutboundMessagesQueueSize(); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(ClusterLocalNodeMetrics.class, this); + } + } http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/c7a9a1db/modules/core/src/main/java/org/apache/ignite/internal/ClusterNodeMetricsMxBean.java ---------------------------------------------------------------------- diff --cc modules/core/src/main/java/org/apache/ignite/internal/ClusterNodeMetricsMxBean.java index 0000000,0000000..58ef4aa new file mode 100644 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/ClusterNodeMetricsMxBean.java @@@ -1,0 -1,0 +1,29 @@@ ++/* ++ * Licensed to the Apache Software Foundation (ASF) under one or more ++ * contributor license agreements. See the NOTICE file distributed with ++ * this work for additional information regarding copyright ownership. ++ * The ASF licenses this file to You under the Apache License, Version 2.0 ++ * (the "License"); you may not use this file except in compliance with ++ * the License. You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ */ ++ ++package org.apache.ignite.internal; ++ ++import org.apache.ignite.cluster.*; ++import org.apache.ignite.mbean.*; ++ ++/** ++ * MBean for local node metrics. ++ */ ++@IgniteMBeanDescription("MBean that provides access to all local node metrics.") ++public interface ClusterNodeMetricsMxBean extends ClusterNodeMetrics { ++ // No-op. ++}