# Renaming
Project: http://git-wip-us.apache.org/repos/asf/incubator-ignite/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-ignite/commit/81e01957 Tree: http://git-wip-us.apache.org/repos/asf/incubator-ignite/tree/81e01957 Diff: http://git-wip-us.apache.org/repos/asf/incubator-ignite/diff/81e01957 Branch: refs/heads/master Commit: 81e01957ee9d674b3803173bfaeebd23821fb3cd Parents: 243e521 Author: sboikov <sboi...@gridgain.com> Authored: Fri Dec 5 11:29:36 2014 +0300 Committer: sboikov <sboi...@gridgain.com> Committed: Fri Dec 5 11:29:39 2014 +0300 ---------------------------------------------------------------------- examples/config/filesystem/example-ggfs.xml | 4 +- .../org/gridgain/examples/ggfs/GgfsExample.java | 2 +- .../examples/ggfs/GgfsMapReduceExample.java | 2 +- .../src/main/java/org/apache/ignite/Ignite.java | 12 +- .../main/java/org/apache/ignite/IgniteFs.java | 24 +- .../configuration/IgniteConfiguration.java | 2 +- .../org/apache/ignite/events/IgniteFsEvent.java | 2 +- .../apache/ignite/fs/IgniteFsBlockLocation.java | 55 ++ ...IgniteFsConcurrentModificationException.java | 28 + .../apache/ignite/fs/IgniteFsConfiguration.java | 798 ++++++++++++++++++ .../fs/IgniteFsCorruptedFileException.java | 42 + .../org/apache/ignite/fs/IgniteFsException.java | 49 ++ .../java/org/apache/ignite/fs/IgniteFsFile.java | 112 +++ .../fs/IgniteFsFileNotFoundException.java | 36 + .../apache/ignite/fs/IgniteFsFileSystem.java | 208 +++++ .../fs/IgniteFsGroupDataBlocksKeyMapper.java | 93 +++ .../apache/ignite/fs/IgniteFsInputStream.java | 72 ++ .../fs/IgniteFsInvalidHdfsVersionException.java | 34 + .../ignite/fs/IgniteFsInvalidPathException.java | 49 ++ .../org/apache/ignite/fs/IgniteFsMetrics.java | 151 ++++ .../java/org/apache/ignite/fs/IgniteFsMode.java | 72 ++ .../ignite/fs/IgniteFsOutOfSpaceException.java | 50 ++ .../apache/ignite/fs/IgniteFsOutputStream.java | 27 + .../fs/IgniteFsParentNotDirectoryException.java | 42 + .../java/org/apache/ignite/fs/IgniteFsPath.java | 254 ++++++ .../fs/IgniteFsPathAlreadyExistsException.java | 42 + .../apache/ignite/fs/IgniteFsPathSummary.java | 130 +++ .../org/apache/ignite/fs/IgniteFsReader.java | 30 + .../main/java/org/apache/ignite/fs/package.html | 15 + .../GridCacheGgfsPerBlockLruEvictionPolicy.java | 2 +- .../grid/ggfs/IgniteFsBlockLocation.java | 55 -- ...IgniteFsConcurrentModificationException.java | 28 - .../grid/ggfs/IgniteFsConfiguration.java | 800 ------------------- .../ggfs/IgniteFsCorruptedFileException.java | 42 - .../gridgain/grid/ggfs/IgniteFsException.java | 49 -- .../org/gridgain/grid/ggfs/IgniteFsFile.java | 112 --- .../ggfs/IgniteFsFileNotFoundException.java | 36 - .../gridgain/grid/ggfs/IgniteFsFileSystem.java | 208 ----- .../ggfs/IgniteFsGroupDataBlocksKeyMapper.java | 93 --- .../gridgain/grid/ggfs/IgniteFsInputStream.java | 72 -- .../IgniteFsInvalidHdfsVersionException.java | 34 - .../grid/ggfs/IgniteFsInvalidPathException.java | 49 -- .../org/gridgain/grid/ggfs/IgniteFsMetrics.java | 151 ---- .../org/gridgain/grid/ggfs/IgniteFsMode.java | 72 -- .../grid/ggfs/IgniteFsOutOfSpaceException.java | 50 -- .../grid/ggfs/IgniteFsOutputStream.java | 27 - .../IgniteFsParentNotDirectoryException.java | 42 - .../org/gridgain/grid/ggfs/IgniteFsPath.java | 254 ------ .../IgniteFsPathAlreadyExistsException.java | 42 - .../gridgain/grid/ggfs/IgniteFsPathSummary.java | 130 --- .../org/gridgain/grid/ggfs/IgniteFsReader.java | 30 - .../grid/ggfs/mapreduce/IgniteFsFileRange.java | 2 +- .../IgniteFsInputStreamJobAdapter.java | 4 +- .../grid/ggfs/mapreduce/IgniteFsJob.java | 6 +- .../mapreduce/IgniteFsRangeInputStream.java | 4 +- .../ggfs/mapreduce/IgniteFsRecordResolver.java | 2 +- .../grid/ggfs/mapreduce/IgniteFsTask.java | 6 +- .../grid/ggfs/mapreduce/IgniteFsTaskArgs.java | 6 +- .../IgniteFsByteDelimiterRecordResolver.java | 2 +- .../IgniteFsFixedLengthRecordResolver.java | 2 +- .../java/org/gridgain/grid/ggfs/package.html | 15 - .../org/gridgain/grid/kernal/GridGainEx.java | 2 +- .../ggfs/common/GridGgfsControlResponse.java | 2 +- .../grid/kernal/ggfs/common/GridGgfsLogger.java | 2 +- .../kernal/ggfs/common/GridGgfsMarshaller.java | 4 +- .../ggfs/common/GridGgfsPathControlRequest.java | 2 +- .../processors/cache/GridCacheAdapter.java | 2 +- .../processors/cache/GridCacheProcessor.java | 2 +- .../kernal/processors/cache/GridCacheUtils.java | 2 +- .../processors/ggfs/GridGgfsAsyncImpl.java | 2 +- .../processors/ggfs/GridGgfsAttributes.java | 8 +- .../ggfs/GridGgfsBlockLocationImpl.java | 2 +- .../kernal/processors/ggfs/GridGgfsContext.java | 2 +- .../processors/ggfs/GridGgfsDataManager.java | 2 +- .../GridGgfsDirectoryNotEmptyException.java | 2 +- .../grid/kernal/processors/ggfs/GridGgfsEx.java | 2 +- .../processors/ggfs/GridGgfsFileInfo.java | 4 +- .../ggfs/GridGgfsFileWorkerBatch.java | 2 +- .../kernal/processors/ggfs/GridGgfsImpl.java | 8 +- .../ggfs/GridGgfsInputStreamAdapter.java | 2 +- .../ggfs/GridGgfsInputStreamImpl.java | 2 +- .../processors/ggfs/GridGgfsIpcHandler.java | 2 +- .../kernal/processors/ggfs/GridGgfsJobImpl.java | 2 +- .../processors/ggfs/GridGgfsMetaManager.java | 2 +- .../processors/ggfs/GridGgfsModeResolver.java | 2 +- .../kernal/processors/ggfs/GridGgfsPaths.java | 2 +- .../processors/ggfs/GridGgfsProcessor.java | 4 +- .../ggfs/GridGgfsProcessorAdapter.java | 2 +- .../GridGgfsSecondaryInputStreamDescriptor.java | 2 +- .../processors/ggfs/GridGgfsServerManager.java | 4 +- .../processors/ggfs/GridNoopGgfsProcessor.java | 2 +- .../processors/ggfs/IgniteFsFileImpl.java | 2 +- .../processors/ggfs/IgniteFsMetricsAdapter.java | 2 +- .../ggfs/IgniteFsOutputStreamAdapter.java | 2 +- .../ggfs/IgniteFsOutputStreamImpl.java | 4 +- .../processors/ggfs/IgniteFsTaskArgsImpl.java | 2 +- .../kernal/processors/job/GridJobWorker.java | 2 +- .../kernal/processors/task/GridTaskWorker.java | 2 +- .../grid/kernal/visor/ggfs/VisorGgfs.java | 2 +- .../kernal/visor/ggfs/VisorGgfsMetrics.java | 4 +- .../kernal/visor/ggfs/VisorGgfsProfiler.java | 2 +- .../visor/ggfs/VisorGgfsProfilerEntry.java | 2 +- .../visor/ggfs/VisorGgfsProfilerTask.java | 2 +- .../visor/node/VisorGgfsConfiguration.java | 2 +- .../grid/kernal/visor/util/VisorTaskUtils.java | 2 +- modules/core/src/test/config/ggfs-loopback.xml | 4 +- .../core/src/test/config/ggfs-no-endpoint.xml | 4 +- modules/core/src/test/config/ggfs-shmem.xml | 4 +- .../ggfs/GridGgfsEventsAbstractSelfTest.java | 1 + .../GridGgfsFragmentizerAbstractSelfTest.java | 1 + .../grid/ggfs/GridGgfsFragmentizerSelfTest.java | 1 + .../GridGgfsFragmentizerTopologySelfTest.java | 1 + .../grid/ggfs/GridGgfsPathSelfTest.java | 3 +- ...heGgfsPerBlockLruEvictionPolicySelfTest.java | 4 +- .../ggfs/GridGgfsAbstractSelfTest.java | 4 +- .../ggfs/GridGgfsAttributesSelfTest.java | 4 +- .../processors/ggfs/GridGgfsCacheSelfTest.java | 2 +- .../ggfs/GridGgfsDataManagerSelfTest.java | 2 +- .../ggfs/GridGgfsDualAbstractSelfTest.java | 4 +- .../ggfs/GridGgfsDualAsyncSelfTest.java | 2 +- .../ggfs/GridGgfsDualSyncSelfTest.java | 2 +- ...GgfsGroupDataBlockKeyMapperHashSelfTest.java | 4 +- .../ggfs/GridGgfsMetaManagerSelfTest.java | 2 +- .../ggfs/GridGgfsMetricsSelfTest.java | 4 +- .../ggfs/GridGgfsModeResolverSelfTest.java | 4 +- .../processors/ggfs/GridGgfsModesSelfTest.java | 4 +- .../GridGgfsPrimaryOffheapTieredSelfTest.java | 2 +- .../GridGgfsPrimaryOffheapValuesSelfTest.java | 2 +- .../ggfs/GridGgfsPrimarySelfTest.java | 2 +- .../ggfs/GridGgfsProcessorSelfTest.java | 2 +- .../GridGgfsProcessorValidationSelfTest.java | 8 +- ...IpcEndpointRegistrationAbstractSelfTest.java | 4 +- ...dpointRegistrationOnLinuxAndMacSelfTest.java | 2 +- .../processors/ggfs/GridGgfsSizeSelfTest.java | 2 +- .../ggfs/GridGgfsStreamsSelfTest.java | 2 +- .../processors/ggfs/GridGgfsTaskSelfTest.java | 4 +- .../GridGgfsAbstractRecordResolverSelfTest.java | 4 +- ...GgfsByteDelimiterRecordResolverSelfTest.java | 2 +- ...idGgfsFixedLengthRecordResolverSelfTest.java | 2 +- ...sNewLineDelimiterRecordResolverSelfTest.java | 2 +- ...fsStringDelimiterRecordResolverSelfTest.java | 2 +- .../shmem/GridIpcSharedMemoryNodeStartup.java | 2 +- .../ggfs/hadoop/GridGgfsHadoopParameters.java | 2 +- .../hadoop/v1/GridGgfsHadoopFileSystem.java | 6 +- .../hadoop/v2/GridGgfsHadoopFileSystem.java | 6 +- .../grid/kernal/ggfs/hadoop/GridGgfsHadoop.java | 2 +- .../ggfs/hadoop/GridGgfsHadoopEndpoint.java | 2 +- .../hadoop/GridGgfsHadoopFileSystemWrapper.java | 4 +- .../ggfs/hadoop/GridGgfsHadoopInProc.java | 2 +- .../ggfs/hadoop/GridGgfsHadoopOutProc.java | 2 +- .../ggfs/hadoop/GridGgfsHadoopReader.java | 2 +- .../kernal/ggfs/hadoop/GridGgfsHadoopUtils.java | 2 +- .../ggfs/hadoop/GridGgfsHadoopWrapper.java | 2 +- .../fs/GridHadoopDistributedFileSystem.java | 2 +- .../GridHadoopDefaultMapReducePlanner.java | 2 +- .../GridHadoopClientProtocolSelfTest.java | 2 +- .../grid/ggfs/GridGgfsEventsTestSuite.java | 3 +- ...dGgfsHadoop20FileSystemAbstractSelfTest.java | 3 +- ...doop20FileSystemLoopbackPrimarySelfTest.java | 2 +- ...sHadoop20FileSystemShmemPrimarySelfTest.java | 2 +- .../GridGgfsHadoopDualAbstractSelfTest.java | 3 +- .../ggfs/GridGgfsHadoopDualAsyncSelfTest.java | 2 +- .../ggfs/GridGgfsHadoopDualSyncSelfTest.java | 2 +- ...ridGgfsHadoopFileSystemAbstractSelfTest.java | 3 +- .../GridGgfsHadoopFileSystemClientSelfTest.java | 1 + ...idGgfsHadoopFileSystemHandshakeSelfTest.java | 3 +- ...ridGgfsHadoopFileSystemIpcCacheSelfTest.java | 1 + .../GridGgfsHadoopFileSystemLoggerSelfTest.java | 3 +- ...GgfsHadoopFileSystemLoggerStateSelfTest.java | 3 +- ...adoopFileSystemLoopbackAbstractSelfTest.java | 2 + ...SystemLoopbackEmbeddedDualAsyncSelfTest.java | 2 +- ...eSystemLoopbackEmbeddedDualSyncSelfTest.java | 2 +- ...leSystemLoopbackEmbeddedPrimarySelfTest.java | 2 +- ...SystemLoopbackEmbeddedSecondarySelfTest.java | 2 +- ...SystemLoopbackExternalDualAsyncSelfTest.java | 2 +- ...eSystemLoopbackExternalDualSyncSelfTest.java | 2 +- ...leSystemLoopbackExternalPrimarySelfTest.java | 2 +- ...SystemLoopbackExternalSecondarySelfTest.java | 2 +- ...fsHadoopFileSystemSecondaryModeSelfTest.java | 3 +- ...fsHadoopFileSystemShmemAbstractSelfTest.java | 1 + ...ileSystemShmemEmbeddedDualAsyncSelfTest.java | 2 +- ...FileSystemShmemEmbeddedDualSyncSelfTest.java | 2 +- ...pFileSystemShmemEmbeddedPrimarySelfTest.java | 2 +- ...ileSystemShmemEmbeddedSecondarySelfTest.java | 2 +- ...ileSystemShmemExternalDualAsyncSelfTest.java | 2 +- ...FileSystemShmemExternalDualSyncSelfTest.java | 2 +- ...pFileSystemShmemExternalPrimarySelfTest.java | 2 +- ...ileSystemShmemExternalSecondarySelfTest.java | 2 +- .../ggfs/GridGgfsNearOnlyMultiNodeSelfTest.java | 1 + .../hadoop/GridHadoopAbstractSelfTest.java | 2 +- .../hadoop/GridHadoopAbstractWordCountTest.java | 2 +- .../hadoop/GridHadoopCommandLineTest.java | 2 +- ...idHadoopDefaultMapReducePlannerSelfTest.java | 2 +- .../GridHadoopMapReduceEmbeddedSelfTest.java | 2 +- .../hadoop/GridHadoopMapReduceTest.java | 2 +- .../hadoop/GridHadoopTaskExecutionSelfTest.java | 2 +- .../hadoop/GridHadoopTasksAllVersionsTest.java | 2 +- ...GridHadoopExternalTaskExecutionSelfTest.java | 2 +- 198 files changed, 2612 insertions(+), 2595 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/examples/config/filesystem/example-ggfs.xml ---------------------------------------------------------------------- diff --git a/examples/config/filesystem/example-ggfs.xml b/examples/config/filesystem/example-ggfs.xml index 1387438..d9b1533 100644 --- a/examples/config/filesystem/example-ggfs.xml +++ b/examples/config/filesystem/example-ggfs.xml @@ -62,7 +62,7 @@ <property name="ggfsConfiguration"> <list> - <bean class="org.gridgain.grid.ggfs.IgniteFsConfiguration"> + <bean class="org.apache.ignite.fs.IgniteFsConfiguration"> <property name="name" value="ggfs"/> <property name="metaCacheName" value="ggfs-meta"/> <property name="dataCacheName" value="ggfs-data"/> @@ -119,7 +119,7 @@ <property name="distributionMode" value="PARTITIONED_ONLY"/> <property name="backups" value="0"/> <property name="affinityMapper"> - <bean class="org.gridgain.grid.ggfs.IgniteFsGroupDataBlocksKeyMapper"> + <bean class="org.apache.ignite.fs.IgniteFsGroupDataBlocksKeyMapper"> <!-- Haw many blocks in row will be stored on the same node. --> <constructor-arg value="512"/> </bean> http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/examples/src/main/java/org/gridgain/examples/ggfs/GgfsExample.java ---------------------------------------------------------------------- diff --git a/examples/src/main/java/org/gridgain/examples/ggfs/GgfsExample.java b/examples/src/main/java/org/gridgain/examples/ggfs/GgfsExample.java index ecdd947..18d2fbf 100644 --- a/examples/src/main/java/org/gridgain/examples/ggfs/GgfsExample.java +++ b/examples/src/main/java/org/gridgain/examples/ggfs/GgfsExample.java @@ -10,8 +10,8 @@ package org.gridgain.examples.ggfs; import org.apache.ignite.*; +import org.apache.ignite.fs.*; import org.gridgain.grid.*; -import org.gridgain.grid.ggfs.*; import org.jetbrains.annotations.*; import java.io.*; http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/examples/src/main/java/org/gridgain/examples/ggfs/GgfsMapReduceExample.java ---------------------------------------------------------------------- diff --git a/examples/src/main/java/org/gridgain/examples/ggfs/GgfsMapReduceExample.java b/examples/src/main/java/org/gridgain/examples/ggfs/GgfsMapReduceExample.java index 454a54e..ec9c52a 100644 --- a/examples/src/main/java/org/gridgain/examples/ggfs/GgfsMapReduceExample.java +++ b/examples/src/main/java/org/gridgain/examples/ggfs/GgfsMapReduceExample.java @@ -11,8 +11,8 @@ package org.gridgain.examples.ggfs; import org.apache.ignite.*; import org.apache.ignite.compute.*; +import org.apache.ignite.fs.*; import org.gridgain.grid.*; -import org.gridgain.grid.ggfs.*; import org.gridgain.grid.ggfs.mapreduce.*; import org.gridgain.grid.ggfs.mapreduce.records.*; http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/Ignite.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/Ignite.java b/modules/core/src/main/java/org/apache/ignite/Ignite.java index 5c387e2..ae706f8 100644 --- a/modules/core/src/main/java/org/apache/ignite/Ignite.java +++ b/modules/core/src/main/java/org/apache/ignite/Ignite.java @@ -214,9 +214,9 @@ public interface Ignite extends AutoCloseable { * @param <V> Value type. * @param name Cache name. * @return Cache instance for given name. - * @see org.gridgain.grid.ggfs.IgniteFsConfiguration - * @see org.gridgain.grid.ggfs.IgniteFsConfiguration#getDataCacheName() - * @see org.gridgain.grid.ggfs.IgniteFsConfiguration#getMetaCacheName() + * @see org.apache.ignite.fs.IgniteFsConfiguration + * @see org.apache.ignite.fs.IgniteFsConfiguration#getDataCacheName() + * @see org.apache.ignite.fs.IgniteFsConfiguration#getMetaCacheName() */ public <K, V> GridCache<K, V> cache(@Nullable String name); @@ -224,9 +224,9 @@ public interface Ignite extends AutoCloseable { * Gets all configured caches. * Caches that are used as GGFS meta and data caches will not be returned in resulting collection. * - * @see org.gridgain.grid.ggfs.IgniteFsConfiguration - * @see org.gridgain.grid.ggfs.IgniteFsConfiguration#getDataCacheName() - * @see org.gridgain.grid.ggfs.IgniteFsConfiguration#getMetaCacheName() + * @see org.apache.ignite.fs.IgniteFsConfiguration + * @see org.apache.ignite.fs.IgniteFsConfiguration#getDataCacheName() + * @see org.apache.ignite.fs.IgniteFsConfiguration#getMetaCacheName() * @return All configured caches. */ public Collection<GridCache<?, ?>> caches(); http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/IgniteFs.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteFs.java b/modules/core/src/main/java/org/apache/ignite/IgniteFs.java index fd3c88c..323a778 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteFs.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteFs.java @@ -9,9 +9,9 @@ package org.apache.ignite; +import org.apache.ignite.fs.*; import org.apache.ignite.lang.*; import org.gridgain.grid.*; -import org.gridgain.grid.ggfs.*; import org.gridgain.grid.ggfs.mapreduce.*; import org.jetbrains.annotations.*; @@ -75,7 +75,7 @@ public interface IgniteFs extends IgniteFsFileSystem, IgniteAsyncSupport { * * @param path Path to get information for. * @return Summary object. - * @throws org.gridgain.grid.ggfs.IgniteFsFileNotFoundException If path is not found. + * @throws org.apache.ignite.fs.IgniteFsFileNotFoundException If path is not found. * @throws GridException If failed. */ public IgniteFsPathSummary summary(IgniteFsPath path) throws GridException; @@ -86,7 +86,7 @@ public interface IgniteFs extends IgniteFsFileSystem, IgniteAsyncSupport { * @param path File path to read. * @return File input stream to read data from. * @throws GridException In case of error. - * @throws org.gridgain.grid.ggfs.IgniteFsFileNotFoundException If path doesn't exist. + * @throws org.apache.ignite.fs.IgniteFsFileNotFoundException If path doesn't exist. */ public IgniteFsInputStream open(IgniteFsPath path) throws GridException; @@ -97,7 +97,7 @@ public interface IgniteFs extends IgniteFsFileSystem, IgniteAsyncSupport { * @param bufSize Read buffer size (bytes) or {@code zero} to use default value. * @return File input stream to read data from. * @throws GridException In case of error. - * @throws org.gridgain.grid.ggfs.IgniteFsFileNotFoundException If path doesn't exist. + * @throws org.apache.ignite.fs.IgniteFsFileNotFoundException If path doesn't exist. */ @Override public IgniteFsInputStream open(IgniteFsPath path, int bufSize) throws GridException; @@ -109,7 +109,7 @@ public interface IgniteFs extends IgniteFsFileSystem, IgniteAsyncSupport { * @param seqReadsBeforePrefetch Amount of sequential reads before prefetch is started. * @return File input stream to read data from. * @throws GridException In case of error. - * @throws org.gridgain.grid.ggfs.IgniteFsFileNotFoundException If path doesn't exist. + * @throws org.apache.ignite.fs.IgniteFsFileNotFoundException If path doesn't exist. */ public IgniteFsInputStream open(IgniteFsPath path, int bufSize, int seqReadsBeforePrefetch) throws GridException; @@ -163,7 +163,7 @@ public interface IgniteFs extends IgniteFsFileSystem, IgniteAsyncSupport { * @param create Create file if it doesn't exist yet. * @return File output stream to append data to. * @throws GridException In case of error. - * @throws org.gridgain.grid.ggfs.IgniteFsFileNotFoundException If path doesn't exist and create flag is {@code false}. + * @throws org.apache.ignite.fs.IgniteFsFileNotFoundException If path doesn't exist and create flag is {@code false}. */ public IgniteFsOutputStream append(IgniteFsPath path, boolean create) throws GridException; @@ -176,7 +176,7 @@ public interface IgniteFs extends IgniteFsFileSystem, IgniteAsyncSupport { * @param props File properties to set only in case it file was just created. * @return File output stream to append data to. * @throws GridException In case of error. - * @throws org.gridgain.grid.ggfs.IgniteFsFileNotFoundException If path doesn't exist and create flag is {@code false}. + * @throws org.apache.ignite.fs.IgniteFsFileNotFoundException If path doesn't exist and create flag is {@code false}. */ @Override public IgniteFsOutputStream append(IgniteFsPath path, int bufSize, boolean create, @Nullable Map<String, String> props) throws GridException; @@ -189,7 +189,7 @@ public interface IgniteFs extends IgniteFsFileSystem, IgniteAsyncSupport { * @param accessTime Optional last access time to set. Value {@code -1} does not update access time. * @param modificationTime Optional last modification time to set. Value {@code -1} does not update * modification time. - * @throws org.gridgain.grid.ggfs.IgniteFsFileNotFoundException If target was not found. + * @throws org.apache.ignite.fs.IgniteFsFileNotFoundException If target was not found. * @throws GridException If error occurred. */ public void setTimes(IgniteFsPath path, long accessTime, long modificationTime) throws GridException; @@ -203,7 +203,7 @@ public interface IgniteFs extends IgniteFsFileSystem, IgniteAsyncSupport { * @param len Size of data in the file to resolve affinity for. * @return Affinity block locations. * @throws GridException In case of error. - * @throws org.gridgain.grid.ggfs.IgniteFsFileNotFoundException If path doesn't exist. + * @throws org.apache.ignite.fs.IgniteFsFileNotFoundException If path doesn't exist. */ public Collection<IgniteFsBlockLocation> affinity(IgniteFsPath path, long start, long len) throws GridException; @@ -218,7 +218,7 @@ public interface IgniteFs extends IgniteFsFileSystem, IgniteAsyncSupport { * @param maxLen Maximum length of a single returned block location length. * @return Affinity block locations. * @throws GridException In case of error. - * @throws org.gridgain.grid.ggfs.IgniteFsFileNotFoundException If path doesn't exist. + * @throws org.apache.ignite.fs.IgniteFsFileNotFoundException If path doesn't exist. */ public Collection<IgniteFsBlockLocation> affinity(IgniteFsPath path, long start, long len, long maxLen) throws GridException; @@ -274,7 +274,7 @@ public interface IgniteFs extends IgniteFsFileSystem, IgniteAsyncSupport { /** * Executes GGFS task with overridden maximum range length (see - * {@link org.gridgain.grid.ggfs.IgniteFsConfiguration#getMaximumTaskRangeLength()} for more information). + * {@link org.apache.ignite.fs.IgniteFsConfiguration#getMaximumTaskRangeLength()} for more information). * <p> * Supports asynchronous execution (see {@link IgniteAsyncSupport}). * @@ -310,7 +310,7 @@ public interface IgniteFs extends IgniteFsFileSystem, IgniteAsyncSupport { /** * Executes GGFS task with overridden maximum range length (see - * {@link org.gridgain.grid.ggfs.IgniteFsConfiguration#getMaximumTaskRangeLength()} for more information). + * {@link org.apache.ignite.fs.IgniteFsConfiguration#getMaximumTaskRangeLength()} for more information). * <p> * Supports asynchronous execution (see {@link IgniteAsyncSupport}). * http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java index 4bfb26c..d0927ea 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java @@ -11,6 +11,7 @@ package org.apache.ignite.configuration; import org.apache.ignite.*; import org.apache.ignite.events.*; +import org.apache.ignite.fs.*; import org.apache.ignite.lang.*; import org.apache.ignite.lifecycle.*; import org.apache.ignite.managed.*; @@ -23,7 +24,6 @@ import org.gridgain.grid.cache.*; import org.gridgain.grid.dotnet.*; import org.gridgain.grid.dr.hub.receiver.*; import org.gridgain.grid.dr.hub.sender.*; -import org.gridgain.grid.ggfs.*; import org.gridgain.grid.hadoop.*; import org.gridgain.grid.kernal.managers.eventstorage.*; import org.gridgain.grid.security.*; http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/events/IgniteFsEvent.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/events/IgniteFsEvent.java b/modules/core/src/main/java/org/apache/ignite/events/IgniteFsEvent.java index 21e6a20..c2b641c 100644 --- a/modules/core/src/main/java/org/apache/ignite/events/IgniteFsEvent.java +++ b/modules/core/src/main/java/org/apache/ignite/events/IgniteFsEvent.java @@ -10,7 +10,7 @@ package org.apache.ignite.events; import org.apache.ignite.cluster.*; -import org.gridgain.grid.ggfs.*; +import org.apache.ignite.fs.*; import org.gridgain.grid.util.tostring.*; import org.gridgain.grid.util.typedef.internal.*; import org.jetbrains.annotations.*; http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsBlockLocation.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsBlockLocation.java b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsBlockLocation.java new file mode 100644 index 0000000..88031a8 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsBlockLocation.java @@ -0,0 +1,55 @@ +/* @java.file.header */ + +/* _________ _____ __________________ _____ + * __ ____/___________(_)______ /__ ____/______ ____(_)_______ + * _ / __ __ ___/__ / _ __ / _ / __ _ __ `/__ / __ __ \ + * / /_/ / _ / _ / / /_/ / / /_/ / / /_/ / _ / _ / / / + * \____/ /_/ /_/ \_,__/ \____/ \__,_/ /_/ /_/ /_/ + */ + +package org.apache.ignite.fs; + +import java.util.*; + +/** + * {@code GGFS} file's data block location in the grid. It is used to determine + * node affinity of a certain file block within the Grid by calling + * {@link org.apache.ignite.IgniteFs#affinity(IgniteFsPath, long, long)} method. + */ +public interface IgniteFsBlockLocation { + /** + * Start position in the file this block relates to. + * + * @return Start position in the file this block relates to. + */ + public long start(); + + /** + * Length of the data block in the file. + * + * @return Length of the data block in the file. + */ + public long length(); + + /** + * Nodes this block belongs to. First node id in collection is + * primary node id. + * + * @return Nodes this block belongs to. + */ + public Collection<UUID> nodeIds(); + + /** + * Compliant with Hadoop interface. + * + * @return Collection of host:port addresses. + */ + public Collection<String> names(); + + /** + * Compliant with Hadoop interface. + * + * @return Collection of host names. + */ + public Collection<String> hosts(); +} http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsConcurrentModificationException.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsConcurrentModificationException.java b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsConcurrentModificationException.java new file mode 100644 index 0000000..79c0049 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsConcurrentModificationException.java @@ -0,0 +1,28 @@ +/* @java.file.header */ + +/* _________ _____ __________________ _____ + * __ ____/___________(_)______ /__ ____/______ ____(_)_______ + * _ / __ __ ___/__ / _ __ / _ / __ _ __ `/__ / __ __ \ + * / /_/ / _ / _ / / /_/ / / /_/ / / /_/ / _ / _ / / / + * \____/ /_/ /_/ \_,__/ \____/ \__,_/ /_/ /_/ /_/ + */ + +package org.apache.ignite.fs; + +/** + * {@code GGFS} exception indicating that file system structure was modified concurrently. This error + * indicates that an operation performed in DUAL mode cannot proceed due to these changes. + */ +public class IgniteFsConcurrentModificationException extends IgniteFsException { + /** */ + private static final long serialVersionUID = 0L; + + /** + * Creates new exception. + * + * @param path Affected path. + */ + public IgniteFsConcurrentModificationException(IgniteFsPath path) { + super("File system entry has been modified concurrently: " + path, null); + } +} http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsConfiguration.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsConfiguration.java b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsConfiguration.java new file mode 100644 index 0000000..13e6746 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsConfiguration.java @@ -0,0 +1,798 @@ +/* @java.file.header */ + +/* _________ _____ __________________ _____ + * __ ____/___________(_)______ /__ ____/______ ____(_)_______ + * _ / __ __ ___/__ / _ __ / _ / __ _ __ `/__ / __ __ \ + * / /_/ / _ / _ / / /_/ / / /_/ / / /_/ / _ / _ / / / + * \____/ /_/ /_/ \_,__/ \____/ \__,_/ /_/ /_/ /_/ + */ + +package org.apache.ignite.fs; + +import org.gridgain.grid.util.typedef.internal.*; +import org.jetbrains.annotations.*; + +import java.util.*; +import java.util.concurrent.*; + +/** + * {@code GGFS} configuration. More than one file system can be configured within grid. + * {@code GGFS} configuration is provided via {@link org.apache.ignite.configuration.IgniteConfiguration#getGgfsConfiguration()} + * method. + * <p> + * Refer to {@code config/hadoop/default-config.xml} or {@code config/hadoop/default-config-client.xml} + * configuration files under GridGain installation to see sample {@code GGFS} configuration. + */ +public class IgniteFsConfiguration { + /** Default file system user name. */ + public static final String DFLT_USER_NAME = System.getProperty("user.name", "anonymous"); + + /** Default IPC port. */ + public static final int DFLT_IPC_PORT = 10500; + + /** Default fragmentizer throttling block length. */ + public static final long DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH = 16 * 1024 * 1024; + + /** Default fragmentizer throttling delay. */ + public static final long DFLT_FRAGMENTIZER_THROTTLING_DELAY = 200; + + /** Default fragmentizer concurrent files. */ + public static final int DFLT_FRAGMENTIZER_CONCURRENT_FILES = 0; + + /** Default fragmentizer local writes ratio. */ + public static final float DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO = 0.8f; + + /** Fragmentizer enabled property. */ + public static final boolean DFLT_FRAGMENTIZER_ENABLED = true; + + /** Default batch size for logging. */ + public static final int DFLT_GGFS_LOG_BATCH_SIZE = 100; + + /** Default {@code GGFS} log directory. */ + public static final String DFLT_GGFS_LOG_DIR = "work/ggfs/log"; + + /** Default per node buffer size. */ + public static final int DFLT_PER_NODE_BATCH_SIZE = 100; + + /** Default number of per node parallel operations. */ + public static final int DFLT_PER_NODE_PARALLEL_BATCH_CNT = 8; + + /** Default GGFS mode. */ + public static final IgniteFsMode DFLT_MODE = IgniteFsMode.DUAL_ASYNC; + + /** Default file's data block size (bytes). */ + public static final int DFLT_BLOCK_SIZE = 1 << 16; + + /** Default read/write buffers size (bytes). */ + public static final int DFLT_BUF_SIZE = 1 << 16; + + /** Default trash directory purge await timeout in case data cache oversize is detected. */ + public static final long DFLT_TRASH_PURGE_TIMEOUT = 1000; + + /** Default management port. */ + public static final int DFLT_MGMT_PORT = 11400; + + /** Default IPC endpoint enabled flag. */ + public static final boolean DFLT_IPC_ENDPOINT_ENABLED = true; + + /** GGFS instance name. */ + private String name; + + /** Cache name to store GGFS meta information. */ + private String metaCacheName; + + /** Cache name to store file's data blocks. */ + private String dataCacheName; + + /** File's data block size (bytes). */ + private int blockSize = DFLT_BLOCK_SIZE; + + /** The number of pre-fetched blocks if specific file's chunk is requested. */ + private int prefetchBlocks; + + /** Amount of sequential block reads before prefetch is triggered. */ + private int seqReadsBeforePrefetch; + + /** Read/write buffers size for stream operations (bytes). */ + private int bufSize = DFLT_BUF_SIZE; + + /** Per node buffer size. */ + private int perNodeBatchSize = DFLT_PER_NODE_BATCH_SIZE; + + /** Per node parallel operations. */ + private int perNodeParallelBatchCnt = DFLT_PER_NODE_PARALLEL_BATCH_CNT; + + /** IPC endpoint properties to publish GGFS over. */ + private Map<String, String> ipcEndpointCfg; + + /** IPC endpoint enabled flag. */ + private boolean ipcEndpointEnabled = DFLT_IPC_ENDPOINT_ENABLED; + + /** Management port. */ + private int mgmtPort = DFLT_MGMT_PORT; + + /** Secondary file system */ + private IgniteFsFileSystem secondaryFs; + + /** GGFS mode. */ + private IgniteFsMode dfltMode = DFLT_MODE; + + /** Fragmentizer throttling block length. */ + private long fragmentizerThrottlingBlockLen = DFLT_FRAGMENTIZER_THROTTLING_BLOCK_LENGTH; + + /** Fragmentizer throttling delay. */ + private long fragmentizerThrottlingDelay = DFLT_FRAGMENTIZER_THROTTLING_DELAY; + + /** Fragmentizer concurrent files. */ + private int fragmentizerConcurrentFiles = DFLT_FRAGMENTIZER_CONCURRENT_FILES; + + /** Fragmentizer local writes ratio. */ + private float fragmentizerLocWritesRatio = DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO; + + /** Fragmentizer enabled flag. */ + private boolean fragmentizerEnabled = DFLT_FRAGMENTIZER_ENABLED; + + /** Path modes. */ + private Map<String, IgniteFsMode> pathModes; + + /** Maximum space. */ + private long maxSpace; + + /** Trash purge await timeout. */ + private long trashPurgeTimeout = DFLT_TRASH_PURGE_TIMEOUT; + + /** Dual mode PUT operations executor service. */ + private ExecutorService dualModePutExec; + + /** Dual mode PUT operations executor service shutdown flag. */ + private boolean dualModePutExecShutdown; + + /** Maximum amount of data in pending puts. */ + private long dualModeMaxPendingPutsSize; + + /** Maximum range length. */ + private long maxTaskRangeLen; + + /** + * Constructs default configuration. + */ + public IgniteFsConfiguration() { + // No-op. + } + + /** + * Constructs the copy of the configuration. + * + * @param cfg Configuration to copy. + */ + public IgniteFsConfiguration(IgniteFsConfiguration cfg) { + assert cfg != null; + + /* + * Must preserve alphabetical order! + */ + blockSize = cfg.getBlockSize(); + bufSize = cfg.getStreamBufferSize(); + dataCacheName = cfg.getDataCacheName(); + dfltMode = cfg.getDefaultMode(); + dualModeMaxPendingPutsSize = cfg.getDualModeMaxPendingPutsSize(); + dualModePutExec = cfg.getDualModePutExecutorService(); + dualModePutExecShutdown = cfg.getDualModePutExecutorServiceShutdown(); + fragmentizerConcurrentFiles = cfg.getFragmentizerConcurrentFiles(); + fragmentizerLocWritesRatio = cfg.getFragmentizerLocalWritesRatio(); + fragmentizerEnabled = cfg.isFragmentizerEnabled(); + fragmentizerThrottlingBlockLen = cfg.getFragmentizerThrottlingBlockLength(); + fragmentizerThrottlingDelay = cfg.getFragmentizerThrottlingDelay(); + secondaryFs = cfg.getSecondaryFileSystem(); + ipcEndpointCfg = cfg.getIpcEndpointConfiguration(); + ipcEndpointEnabled = cfg.isIpcEndpointEnabled(); + maxSpace = cfg.getMaxSpaceSize(); + maxTaskRangeLen = cfg.getMaximumTaskRangeLength(); + metaCacheName = cfg.getMetaCacheName(); + mgmtPort = cfg.getManagementPort(); + name = cfg.getName(); + pathModes = cfg.getPathModes(); + perNodeBatchSize = cfg.getPerNodeBatchSize(); + perNodeParallelBatchCnt = cfg.getPerNodeParallelBatchCount(); + prefetchBlocks = cfg.getPrefetchBlocks(); + seqReadsBeforePrefetch = cfg.getSequentialReadsBeforePrefetch(); + trashPurgeTimeout = cfg.getTrashPurgeTimeout(); + } + + /** + * Gets GGFS instance name. If {@code null}, then instance with default + * name will be used. + * + * @return GGFS instance name. + */ + @Nullable public String getName() { + return name; + } + + /** + * Sets GGFS instance name. + * + * @param name GGFS instance name. + */ + public void setName(String name) { + this.name = name; + } + + /** + * Cache name to store GGFS meta information. If {@code null}, then instance + * with default meta-cache name will be used. + * + * @return Cache name to store GGFS meta information. + */ + @Nullable public String getMetaCacheName() { + return metaCacheName; + } + + /** + * Sets cache name to store GGFS meta information. + * + * @param metaCacheName Cache name to store GGFS meta information. + */ + public void setMetaCacheName(String metaCacheName) { + this.metaCacheName = metaCacheName; + } + + /** + * Cache name to store GGFS data. + * + * @return Cache name to store GGFS data. + */ + @Nullable public String getDataCacheName() { + return dataCacheName; + } + + /** + * Sets cache name to store GGFS data. + * + * @param dataCacheName Cache name to store GGFS data. + */ + public void setDataCacheName(String dataCacheName) { + this.dataCacheName = dataCacheName; + } + + /** + * Get file's data block size. + * + * @return File's data block size. + */ + public int getBlockSize() { + return blockSize; + } + + /** + * Sets file's data block size. + * + * @param blockSize File's data block size (bytes) or {@code 0} to reset default value. + */ + public void setBlockSize(int blockSize) { + A.ensure(blockSize >= 0, "blockSize >= 0"); + + this.blockSize = blockSize == 0 ? DFLT_BLOCK_SIZE : blockSize; + } + + /** + * Get number of pre-fetched blocks if specific file's chunk is requested. + * + * @return The number of pre-fetched blocks. + */ + public int getPrefetchBlocks() { + return prefetchBlocks; + } + + /** + * Sets the number of pre-fetched blocks if specific file's chunk is requested. + * + * @param prefetchBlocks New number of pre-fetched blocks. + */ + public void setPrefetchBlocks(int prefetchBlocks) { + A.ensure(prefetchBlocks >= 0, "prefetchBlocks >= 0"); + + this.prefetchBlocks = prefetchBlocks; + } + + /** + * Get amount of sequential block reads before prefetch is triggered. The + * higher this value, the longer GGFS will wait before starting to prefetch + * values ahead of time. Depending on the use case, this can either help + * or hurt performance. + * <p> + * Default is {@code 0} which means that pre-fetching will start right away. + * <h1 class="header">Integration With Hadoop</h1> + * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing + * {@code org.gridgain.grid.ggfs.hadoop.GridGgfsHadoopParameters.PARAM_GGFS_SEQ_READS_BEFORE_PREFETCH} + * configuration property directly to Hadoop MapReduce task. + * <p> + * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition. + * + * @return Amount of sequential block reads. + */ + public int getSequentialReadsBeforePrefetch() { + return seqReadsBeforePrefetch; + } + + /** + * Sets amount of sequential block reads before prefetch is triggered. The + * higher this value, the longer GGFS will wait before starting to prefetch + * values ahead of time. Depending on the use case, this can either help + * or hurt performance. + * <p> + * Default is {@code 0} which means that pre-fetching will start right away. + * <h1 class="header">Integration With Hadoop</h1> + * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing + * {@code org.gridgain.grid.ggfs.hadoop.GridGgfsHadoopParameters.PARAM_GGFS_SEQ_READS_BEFORE_PREFETCH} + * configuration property directly to Hadoop MapReduce task. + * <p> + * <b>NOTE:</b> Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition. + * + * @param seqReadsBeforePrefetch Amount of sequential block reads before prefetch is triggered. + */ + public void setSequentialReadsBeforePrefetch(int seqReadsBeforePrefetch) { + A.ensure(seqReadsBeforePrefetch >= 0, "seqReadsBeforePrefetch >= 0"); + + this.seqReadsBeforePrefetch = seqReadsBeforePrefetch; + } + + /** + * Get read/write buffer size for {@code GGFS} stream operations in bytes. + * + * @return Read/write buffers size (bytes). + */ + public int getStreamBufferSize() { + return bufSize; + } + + /** + * Sets read/write buffers size for {@code GGFS} stream operations (bytes). + * + * @param bufSize Read/write buffers size for stream operations (bytes) or {@code 0} to reset default value. + */ + public void setStreamBufferSize(int bufSize) { + A.ensure(bufSize >= 0, "bufSize >= 0"); + + this.bufSize = bufSize == 0 ? DFLT_BUF_SIZE : bufSize; + } + + /** + * Gets number of file blocks buffered on local node before sending batch to remote node. + * + * @return Per node buffer size. + */ + public int getPerNodeBatchSize() { + return perNodeBatchSize; + } + + /** + * Sets number of file blocks collected on local node before sending batch to remote node. + * + * @param perNodeBatchSize Per node buffer size. + */ + public void setPerNodeBatchSize(int perNodeBatchSize) { + this.perNodeBatchSize = perNodeBatchSize; + } + + /** + * Gets number of batches that can be concurrently sent to remote node. + * + * @return Number of batches for each node. + */ + public int getPerNodeParallelBatchCount() { + return perNodeParallelBatchCnt; + } + + /** + * Sets number of file block batches that can be concurrently sent to remote node. + * + * @param perNodeParallelBatchCnt Per node parallel load operations. + */ + public void setPerNodeParallelBatchCount(int perNodeParallelBatchCnt) { + this.perNodeParallelBatchCnt = perNodeParallelBatchCnt; + } + + /** + * Gets map of IPC endpoint configuration properties. There are 2 different + * types of endpoint supported: {@code shared-memory}, and {@code TCP}. + * <p> + * The following configuration properties are supported for {@code shared-memory} + * endpoint: + * <ul> + * <li>{@code type} - value is {@code shmem} to specify {@code shared-memory} approach.</li> + * <li>{@code port} - endpoint port.</li> + * <li>{@code size} - memory size allocated for single endpoint communication.</li> + * <li> + * {@code tokenDirectoryPath} - path, either absolute or relative to {@code GRIDGAIN_HOME} to + * store shared memory tokens. + * </li> + * </ul> + * <p> + * The following configuration properties are supported for {@code TCP} approach: + * <ul> + * <li>{@code type} - value is {@code tcp} to specify {@code TCP} approach.</li> + * <li>{@code port} - endpoint bind port.</li> + * <li> + * {@code host} - endpoint bind host. If omitted '127.0.0.1' will be used. + * </li> + * </ul> + * <p> + * Note that {@code shared-memory} approach is not supported on Windows environments. + * In case GGFS is failed to bind to particular port, further attempts will be performed every 3 seconds. + * + * @return Map of IPC endpoint configuration properties. In case the value is not set, defaults will be used. Default + * type for Windows is "tcp", for all other platforms - "shmem". Default port is {@link #DFLT_IPC_PORT}. + */ + @Nullable public Map<String,String> getIpcEndpointConfiguration() { + return ipcEndpointCfg; + } + + /** + * Sets IPC endpoint configuration to publish GGFS over. + * + * @param ipcEndpointCfg Map of IPC endpoint config properties. + */ + public void setIpcEndpointConfiguration(@Nullable Map<String,String> ipcEndpointCfg) { + this.ipcEndpointCfg = ipcEndpointCfg; + } + + /** + * Get IPC endpoint enabled flag. In case it is set to {@code true} endpoint will be created and bound to specific + * port. Otherwise endpoint will not be created. Default value is {@link #DFLT_IPC_ENDPOINT_ENABLED}. + * + * @return {@code True} in case endpoint is enabled. + */ + public boolean isIpcEndpointEnabled() { + return ipcEndpointEnabled; + } + + /** + * Set IPC endpoint enabled flag. See {@link #isIpcEndpointEnabled()}. + * + * @param ipcEndpointEnabled IPC endpoint enabled flag. + */ + public void setIpcEndpointEnabled(boolean ipcEndpointEnabled) { + this.ipcEndpointEnabled = ipcEndpointEnabled; + } + + /** + * Gets port number for management endpoint. All GGFS nodes should have this port open + * for Visor Management Console to work with GGFS. + * <p> + * Default value is {@link #DFLT_MGMT_PORT} + * + * @return Port number or {@code -1} if management endpoint should be disabled. + */ + public int getManagementPort() { + return mgmtPort; + } + + /** + * Sets management endpoint port. + * + * @param mgmtPort port number or {@code -1} to disable management endpoint. + */ + public void setManagementPort(int mgmtPort) { + this.mgmtPort = mgmtPort; + } + + /** + * Gets mode to specify how {@code GGFS} interacts with Hadoop file system, like {@code HDFS}. + * Secondary Hadoop file system is provided for pass-through, write-through, and read-through + * purposes. + * <p> + * Default mode is {@link IgniteFsMode#DUAL_ASYNC}. If secondary Hadoop file system is + * not configured, this mode will work just like {@link IgniteFsMode#PRIMARY} mode. + * + * @return Mode to specify how GGFS interacts with secondary HDFS file system. + */ + public IgniteFsMode getDefaultMode() { + return dfltMode; + } + + /** + * Sets {@code GGFS} mode to specify how it should interact with secondary + * Hadoop file system, like {@code HDFS}. Secondary Hadoop file system is provided + * for pass-through, write-through, and read-through purposes. + * + * @param dfltMode {@code GGFS} mode. + */ + public void setDefaultMode(IgniteFsMode dfltMode) { + this.dfltMode = dfltMode; + } + + /** + * Gets the secondary file system. Secondary file system is provided for pass-through, write-through, + * and read-through purposes. + * + * @return Secondary file system. + */ + public IgniteFsFileSystem getSecondaryFileSystem() { + return secondaryFs; + } + + /** + * Sets the secondary file system. Secondary file system is provided for pass-through, write-through, + * and read-through purposes. + * + * @param fileSystem + */ + public void setSecondaryFileSystem(IgniteFsFileSystem fileSystem) { + secondaryFs = fileSystem; + } + + /** + * Gets map of path prefixes to {@code GGFS} modes used for them. + * <p> + * If path doesn't correspond to any specified prefix or mappings are not provided, then + * {@link #getDefaultMode()} is used. + * <p> + * Several folders under {@code '/gridgain'} folder have predefined mappings which cannot be overridden. + * <li>{@code /gridgain/primary} and all it's sub-folders will always work in {@code PRIMARY} mode.</li> + * <p> + * And in case secondary file system URI is provided: + * <li>{@code /gridgain/proxy} and all it's sub-folders will always work in {@code PROXY} mode.</li> + * <li>{@code /gridgain/sync} and all it's sub-folders will always work in {@code DUAL_SYNC} mode.</li> + * <li>{@code /gridgain/async} and all it's sub-folders will always work in {@code DUAL_ASYNC} mode.</li> + * + * @return Map of paths to {@code GGFS} modes. + */ + @Nullable public Map<String, IgniteFsMode> getPathModes() { + return pathModes; + } + + /** + * Sets map of path prefixes to {@code GGFS} modes used for them. + * <p> + * If path doesn't correspond to any specified prefix or mappings are not provided, then + * {@link #getDefaultMode()} is used. + * + * @param pathModes Map of paths to {@code GGFS} modes. + */ + public void setPathModes(Map<String, IgniteFsMode> pathModes) { + this.pathModes = pathModes; + } + + /** + * Gets the length of file chunk to send before delaying the fragmentizer. + * + * @return File chunk length in bytes. + */ + public long getFragmentizerThrottlingBlockLength() { + return fragmentizerThrottlingBlockLen; + } + + /** + * Sets length of file chunk to transmit before throttling is delayed. + * + * @param fragmentizerThrottlingBlockLen Block length in bytes. + */ + public void setFragmentizerThrottlingBlockLength(long fragmentizerThrottlingBlockLen) { + this.fragmentizerThrottlingBlockLen = fragmentizerThrottlingBlockLen; + } + + /** + * Gets throttle delay for fragmentizer. + * + * @return Throttle delay in milliseconds. + */ + public long getFragmentizerThrottlingDelay() { + return fragmentizerThrottlingDelay; + } + + /** + * Sets delay in milliseconds for which fragmentizer is paused. + * + * @param fragmentizerThrottlingDelay Delay in milliseconds. + */ + public void setFragmentizerThrottlingDelay(long fragmentizerThrottlingDelay) { + this.fragmentizerThrottlingDelay = fragmentizerThrottlingDelay; + } + + /** + * Gets number of files that can be processed by fragmentizer concurrently. + * + * @return Number of files to process concurrently. + */ + public int getFragmentizerConcurrentFiles() { + return fragmentizerConcurrentFiles; + } + + /** + * Sets number of files to process concurrently by fragmentizer. + * + * @param fragmentizerConcurrentFiles Number of files to process concurrently. + */ + public void setFragmentizerConcurrentFiles(int fragmentizerConcurrentFiles) { + this.fragmentizerConcurrentFiles = fragmentizerConcurrentFiles; + } + + /** + * Gets amount of local memory (in % of local GGFS max space size) available for local writes + * during file creation. + * <p> + * If current GGFS space size is less than {@code fragmentizerLocalWritesRatio * maxSpaceSize}, + * then file blocks will be written to the local node first and then asynchronously distributed + * among cluster nodes (fragmentized). + * <p> + * Default value is {@link #DFLT_FRAGMENTIZER_LOCAL_WRITES_RATIO}. + * + * @return Ratio for local writes space. + */ + public float getFragmentizerLocalWritesRatio() { + return fragmentizerLocWritesRatio; + } + + /** + * Sets ratio for space available for local file writes. + * + * @param fragmentizerLocWritesRatio Ratio for local file writes. + * @see #getFragmentizerLocalWritesRatio() + */ + public void setFragmentizerLocalWritesRatio(float fragmentizerLocWritesRatio) { + this.fragmentizerLocWritesRatio = fragmentizerLocWritesRatio; + } + + /** + * Gets flag indicating whether GGFS fragmentizer is enabled. If fragmentizer is disabled, files will be + * written in distributed fashion. + * + * @return Flag indicating whether fragmentizer is enabled. + */ + public boolean isFragmentizerEnabled() { + return fragmentizerEnabled; + } + + /** + * Sets property indicating whether fragmentizer is enabled. + * + * @param fragmentizerEnabled {@code True} if fragmentizer is enabled. + */ + public void setFragmentizerEnabled(boolean fragmentizerEnabled) { + this.fragmentizerEnabled = fragmentizerEnabled; + } + + /** + * Get maximum space available for data cache to store file system entries. + * + * @return Maximum space available for data cache. + */ + public long getMaxSpaceSize() { + return maxSpace; + } + + /** + * Set maximum space in bytes available in data cache. + * + * @param maxSpace Maximum space available in data cache. + */ + public void setMaxSpaceSize(long maxSpace) { + this.maxSpace = maxSpace; + } + + /** + * Gets maximum timeout awaiting for trash purging in case data cache oversize is detected. + * + * @return Maximum timeout awaiting for trash purging in case data cache oversize is detected. + */ + public long getTrashPurgeTimeout() { + return trashPurgeTimeout; + } + + /** + * Sets maximum timeout awaiting for trash purging in case data cache oversize is detected. + * + * @param trashPurgeTimeout Maximum timeout awaiting for trash purging in case data cache oversize is detected. + */ + public void setTrashPurgeTimeout(long trashPurgeTimeout) { + this.trashPurgeTimeout = trashPurgeTimeout; + } + + /** + * Get DUAL mode put operation executor service. This executor service will process cache PUT requests for + * data which came from the secondary file system and about to be written to GGFS data cache. + * In case no executor service is provided, default one will be created with maximum amount of threads equals + * to amount of processor cores. + * + * @return Get DUAL mode put operation executor service + */ + @Nullable public ExecutorService getDualModePutExecutorService() { + return dualModePutExec; + } + + /** + * Set DUAL mode put operations executor service. + * + * @param dualModePutExec Dual mode put operations executor service. + */ + public void setDualModePutExecutorService(ExecutorService dualModePutExec) { + this.dualModePutExec = dualModePutExec; + } + + /** + * Get DUAL mode put operation executor service shutdown flag. + * + * @return DUAL mode put operation executor service shutdown flag. + */ + public boolean getDualModePutExecutorServiceShutdown() { + return dualModePutExecShutdown; + } + + /** + * Set DUAL mode put operations executor service shutdown flag. + * + * @param dualModePutExecShutdown Dual mode put operations executor service shutdown flag. + */ + public void setDualModePutExecutorServiceShutdown(boolean dualModePutExecShutdown) { + this.dualModePutExecShutdown = dualModePutExecShutdown; + } + + /** + * Get maximum amount of pending data read from the secondary file system and waiting to be written to data + * cache. {@code 0} or negative value stands for unlimited size. + * <p> + * By default this value is set to {@code 0}. It is recommended to set positive value in case your + * application performs frequent reads of large amount of data from the secondary file system in order to + * avoid issues with increasing GC pauses or out-of-memory error. + * + * @return Maximum amount of pending data read from the secondary file system + */ + public long getDualModeMaxPendingPutsSize() { + return dualModeMaxPendingPutsSize; + } + + /** + * Set maximum amount of data in pending put operations. + * + * @param dualModeMaxPendingPutsSize Maximum amount of data in pending put operations. + */ + public void setDualModeMaxPendingPutsSize(long dualModeMaxPendingPutsSize) { + this.dualModeMaxPendingPutsSize = dualModeMaxPendingPutsSize; + } + + /** + * Get maximum default range size of a file being split during GGFS task execution. When GGFS task is about to + * be executed, it requests file block locations first. Each location is defined as {@link org.gridgain.grid.ggfs.mapreduce.IgniteFsFileRange} which + * has length. In case this parameter is set to positive value, then GGFS will split single file range into smaller + * ranges with length not greater that this parameter. The only exception to this case is when maximum task range + * length is smaller than file block size. In this case maximum task range size will be overridden and set to file + * block size. + * <p> + * Note that this parameter is applied when task is split into jobs before {@link org.gridgain.grid.ggfs.mapreduce.IgniteFsRecordResolver} is + * applied. Therefore, final file ranges being assigned to particular jobs could be greater than value of this + * parameter depending on file data layout and selected resolver type. + * <p> + * Setting this parameter might be useful when file is highly colocated and have very long consequent data chunks + * so that task execution suffers from insufficient parallelism. E.g., in case you have one GGFS node in topology + * and want to process 1Gb file, then only single range of length 1Gb will be returned. This will result in + * a single job which will be processed in one thread. But in case you provide this configuration parameter and set + * maximum range length to 16Mb, then 64 ranges will be returned resulting in 64 jobs which could be executed in + * parallel. + * <p> + * Note that some {@code GridGgfs.execute()} methods can override value of this parameter. + * <p> + * In case value of this parameter is set to {@code 0} or negative value, it is simply ignored. Default value is + * {@code 0}. + * + * @return Maximum range size of a file being split during GGFS task execution. + */ + public long getMaximumTaskRangeLength() { + return maxTaskRangeLen; + } + + /** + * Set maximum default range size of a file being split during GGFS task execution. + * See {@link #getMaximumTaskRangeLength()} for more details. + * + * @param maxTaskRangeLen Set maximum default range size of a file being split during GGFS task execution. + */ + public void setMaximumTaskRangeLength(long maxTaskRangeLen) { + this.maxTaskRangeLen = maxTaskRangeLen; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(IgniteFsConfiguration.class, this); + } +} http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsCorruptedFileException.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsCorruptedFileException.java b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsCorruptedFileException.java new file mode 100644 index 0000000..28932ff --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsCorruptedFileException.java @@ -0,0 +1,42 @@ +/* @java.file.header */ + +/* _________ _____ __________________ _____ + * __ ____/___________(_)______ /__ ____/______ ____(_)_______ + * _ / __ __ ___/__ / _ __ / _ / __ _ __ `/__ / __ __ \ + * / /_/ / _ / _ / / /_/ / / /_/ / / /_/ / _ / _ / / / + * \____/ /_/ /_/ \_,__/ \____/ \__,_/ /_/ /_/ /_/ + */ + +package org.apache.ignite.fs; + +import org.jetbrains.annotations.*; + +/** + * Exception thrown when target file's block is not found in data cache. + */ +public class IgniteFsCorruptedFileException extends IgniteFsException { + /** */ + private static final long serialVersionUID = 0L; + + /** + * @param msg Error message. + */ + public IgniteFsCorruptedFileException(String msg) { + super(msg); + } + + /** + * @param cause Error cause. + */ + public IgniteFsCorruptedFileException(Throwable cause) { + super(cause); + } + + /** + * @param msg Error message. + * @param cause Error cause. + */ + public IgniteFsCorruptedFileException(String msg, @Nullable Throwable cause) { + super(msg, cause); + } +} http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsException.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsException.java b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsException.java new file mode 100644 index 0000000..d791197 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsException.java @@ -0,0 +1,49 @@ +/* @java.file.header */ + +/* _________ _____ __________________ _____ + * __ ____/___________(_)______ /__ ____/______ ____(_)_______ + * _ / __ __ ___/__ / _ __ / _ / __ _ __ `/__ / __ __ \ + * / /_/ / _ / _ / / /_/ / / /_/ / / /_/ / _ / _ / / / + * \____/ /_/ /_/ \_,__/ \____/ \__,_/ /_/ /_/ /_/ + */ + +package org.apache.ignite.fs; + +import org.gridgain.grid.*; +import org.jetbrains.annotations.*; + +/** + * {@code GGFS} exception thrown by file system components. + */ +public class IgniteFsException extends GridException { + /** */ + private static final long serialVersionUID = 0L; + + /** + * Creates an instance of GGFS exception with descriptive error message. + * + * @param msg Error message. + */ + public IgniteFsException(String msg) { + super(msg); + } + + /** + * Creates an instance of GGFS exception caused by nested exception. + * + * @param cause Exception cause. + */ + public IgniteFsException(Throwable cause) { + super(cause); + } + + /** + * Creates an instance of GGFS exception with error message and underlying cause. + * + * @param msg Error message. + * @param cause Exception cause. + */ + public IgniteFsException(String msg, @Nullable Throwable cause) { + super(msg, cause); + } +} http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsFile.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsFile.java b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsFile.java new file mode 100644 index 0000000..d6bd513 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsFile.java @@ -0,0 +1,112 @@ +/* @java.file.header */ + +/* _________ _____ __________________ _____ + * __ ____/___________(_)______ /__ ____/______ ____(_)_______ + * _ / __ __ ___/__ / _ __ / _ / __ _ __ `/__ / __ __ \ + * / /_/ / _ / _ / / /_/ / / /_/ / / /_/ / _ / _ / / / + * \____/ /_/ /_/ \_,__/ \____/ \__,_/ /_/ /_/ /_/ + */ + +package org.apache.ignite.fs; + +import org.jetbrains.annotations.*; + +import java.util.*; + +/** + * {@code GGFS} file or directory descriptor. For example, to get information about + * a file you would use the following code: + * <pre name="code" class="java"> + * GridGgfsPath filePath = new GridGgfsPath("my/working/dir", "file.txt"); + * + * // Get metadata about file. + * GridGgfsFile file = ggfs.info(filePath); + * </pre> + */ +public interface IgniteFsFile { + /** + * Gets path to file. + * + * @return Path to file. + */ + public IgniteFsPath path(); + + /** + * Check this file is a data file. + * + * @return {@code True} if this is a data file. + */ + public boolean isFile(); + + /** + * Check this file is a directory. + * + * @return {@code True} if this is a directory. + */ + public boolean isDirectory(); + + /** + * Gets file's length. + * + * @return File's length or {@code zero} for directories. + */ + public long length(); + + /** + * Gets file's data block size. + * + * @return File's data block size or {@code zero} for directories. + */ + public int blockSize(); + + /** + * Gets file group block size (i.e. block size * group size). + * + * @return File group block size. + */ + public long groupBlockSize(); + + /** + * Gets file last access time. File last access time is not updated automatically due to + * performance considerations and can be updated on demand with + * {@link org.apache.ignite.IgniteFs#setTimes(IgniteFsPath, long, long)} method. + * <p> + * By default last access time equals file creation time. + * + * @return Last access time. + */ + public long accessTime(); + + /** + * Gets file last modification time. File modification time is updated automatically on each file write and + * append. + * + * @return Last modification time. + */ + public long modificationTime(); + + /** + * Get file's property for specified name. + * + * @param name Name of the property. + * @return File's property for specified name. + * @throws IllegalArgumentException If requested property was not found. + */ + public String property(String name) throws IllegalArgumentException; + + /** + * Get file's property for specified name. + * + * @param name Name of the property. + * @param dfltVal Default value if requested property was not found. + * @return File's property for specified name. + */ + @Nullable public String property(String name, @Nullable String dfltVal); + + /** + * Get properties of the file. + * + * @return Properties of the file. + */ + public Map<String, String> properties(); +} http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsFileNotFoundException.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsFileNotFoundException.java b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsFileNotFoundException.java new file mode 100644 index 0000000..0dc1bdc --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsFileNotFoundException.java @@ -0,0 +1,36 @@ +/* @java.file.header */ + +/* _________ _____ __________________ _____ + * __ ____/___________(_)______ /__ ____/______ ____(_)_______ + * _ / __ __ ___/__ / _ __ / _ / __ _ __ `/__ / __ __ \ + * / /_/ / _ / _ / / /_/ / / /_/ / / /_/ / _ / _ / / / + * \____/ /_/ /_/ \_,__/ \____/ \__,_/ /_/ /_/ /_/ + */ + +package org.apache.ignite.fs; + +/** + * {@code GGFS} exception indicating that target resource is not found. + */ +public class IgniteFsFileNotFoundException extends IgniteFsInvalidPathException { + /** */ + private static final long serialVersionUID = 0L; + + /** + * Creates exception with error message specified. + * + * @param msg Error message. + */ + public IgniteFsFileNotFoundException(String msg) { + super(msg); + } + + /** + * Creates exception with given exception cause. + * + * @param cause Exception cause. + */ + public IgniteFsFileNotFoundException(Throwable cause) { + super(cause); + } +} http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsFileSystem.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsFileSystem.java b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsFileSystem.java new file mode 100644 index 0000000..4cd30ac --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsFileSystem.java @@ -0,0 +1,208 @@ +/* @java.file.header */ + +/* _________ _____ __________________ _____ + * __ ____/___________(_)______ /__ ____/______ ____(_)_______ + * _ / __ __ ___/__ / _ __ / _ / __ _ __ `/__ / __ __ \ + * / /_/ / _ / _ / / /_/ / / /_/ / / /_/ / _ / _ / / / + * \____/ /_/ /_/ \_,__/ \____/ \__,_/ /_/ /_/ /_/ + */ + +package org.apache.ignite.fs; + +import org.gridgain.grid.*; +import org.jetbrains.annotations.*; + +import java.io.*; +import java.util.*; + +/** + * Common file system interface. It provides a typical generalized "view" of any file system: + * <ul> + * <li>list directories or get information for a single path</li> + * <li>create/move/delete files or directories</li> + * <li>write/read data streams into/from files</li> + * </ul> + * + * This is the minimum of functionality that is needed to work as secondary file system in dual modes of GGFS. + */ +public interface IgniteFsFileSystem { + /** File property: user name. */ + public static final String PROP_USER_NAME = "usrName"; + + /** File property: group name. */ + public static final String PROP_GROUP_NAME = "grpName"; + + /** File property: permission. */ + public static final String PROP_PERMISSION = "permission"; + + /** + * Checks if the specified path exists in the file system. + * + * @param path Path to check for existence in the file system. + * @return {@code True} if such file exists, otherwise - {@code false}. + * @throws GridException In case of error. + */ + public boolean exists(IgniteFsPath path) throws GridException; + + /** + * Updates file information for the specified path. Existent properties, not listed in the passed collection, + * will not be affected. Other properties will be added or overwritten. Passed properties with {@code null} values + * will be removed from the stored properties or ignored if they don't exist in the file info. + * <p> + * When working in {@code DUAL_SYNC} or {@code DUAL_ASYNC} modes only the following properties will be propagated + * to the secondary file system: + * <ul> + * <li>{@code usrName} - file owner name;</li> + * <li>{@code grpName} - file owner group;</li> + * <li>{@code permission} - Unix-style string representing file permissions.</li> + * </ul> + * + * @param path File path to set properties for. + * @param props Properties to update. + * @return File information for specified path or {@code null} if such path does not exist. + * @throws GridException In case of error. + */ + @Nullable public IgniteFsFile update(IgniteFsPath path, Map<String, String> props) throws GridException; + + /** + * Renames/moves a file. + * <p> + * You are free to rename/move data files as you wish, but directories can be only renamed. + * You cannot move the directory between different parent directories. + * <p> + * Examples: + * <ul> + * <li>"/work/file.txt" => "/home/project/Presentation Scenario.txt"</li> + * <li>"/work" => "/work-2012.bkp"</li> + * <li>"/work" => "<strike>/backups/work</strike>" - such operation is restricted for directories.</li> + * </ul> + * + * @param src Source file path to rename. + * @param dest Destination file path. If destination path is a directory, then source file will be placed + * into destination directory with original name. + * @throws GridException In case of error. + * @throws IgniteFsFileNotFoundException If source file doesn't exist. + */ + public void rename(IgniteFsPath src, IgniteFsPath dest) throws GridException; + + /** + * Deletes file. + * + * @param path File path to delete. + * @param recursive Delete non-empty directories recursively. + * @return {@code True} in case of success, {@code false} otherwise. + * @throws GridException In case of error. + */ + boolean delete(IgniteFsPath path, boolean recursive) throws GridException; + + /** + * Creates directories under specified path. + * + * @param path Path of directories chain to create. + * @throws GridException In case of error. + */ + public void mkdirs(IgniteFsPath path) throws GridException; + + /** + * Creates directories under specified path with the specified properties. + * + * @param path Path of directories chain to create. + * @param props Metadata properties to set on created directories. + * @throws GridException In case of error. + */ + public void mkdirs(IgniteFsPath path, @Nullable Map<String, String> props) throws GridException; + + /** + * Lists file paths under the specified path. + * + * @param path Path to list files under. + * @return List of files under the specified path. + * @throws GridException In case of error. + * @throws IgniteFsFileNotFoundException If path doesn't exist. + */ + public Collection<IgniteFsPath> listPaths(IgniteFsPath path) throws GridException; + + /** + * Lists files under the specified path. + * + * @param path Path to list files under. + * @return List of files under the specified path. + * @throws GridException In case of error. + * @throws IgniteFsFileNotFoundException If path doesn't exist. + */ + public Collection<IgniteFsFile> listFiles(IgniteFsPath path) throws GridException; + + /** + * Opens a file for reading. + * + * @param path File path to read. + * @param bufSize Read buffer size (bytes) or {@code zero} to use default value. + * @return File input stream to read data from. + * @throws GridException In case of error. + * @throws IgniteFsFileNotFoundException If path doesn't exist. + */ + public IgniteFsReader open(IgniteFsPath path, int bufSize) throws GridException; + + /** + * Creates a file and opens it for writing. + * + * @param path File path to create. + * @param overwrite Overwrite file if it already exists. Note: you cannot overwrite an existent directory. + * @return File output stream to write data to. + * @throws GridException In case of error. + */ + public OutputStream create(IgniteFsPath path, boolean overwrite) throws GridException; + + /** + * Creates a file and opens it for writing. + * + * @param path File path to create. + * @param bufSize Write buffer size (bytes) or {@code zero} to use default value. + * @param overwrite Overwrite file if it already exists. Note: you cannot overwrite an existent directory. + * @param replication Replication factor. + * @param blockSize Block size. + * @param props File properties to set. + * @return File output stream to write data to. + * @throws GridException In case of error. + */ + public OutputStream create(IgniteFsPath path, int bufSize, boolean overwrite, int replication, long blockSize, + @Nullable Map<String, String> props) throws GridException; + + /** + * Opens an output stream to an existing file for appending data. + * + * @param path File path to append. + * @param bufSize Write buffer size (bytes) or {@code zero} to use default value. + * @param create Create file if it doesn't exist yet. + * @param props File properties to set only in case it file was just created. + * @return File output stream to append data to. + * @throws GridException In case of error. + * @throws IgniteFsFileNotFoundException If path doesn't exist and create flag is {@code false}. + */ + public OutputStream append(IgniteFsPath path, int bufSize, boolean create, @Nullable Map<String, String> props) + throws GridException; + + /** + * Gets file information for the specified path. + * + * @param path Path to get information for. + * @return File information for specified path or {@code null} if such path does not exist. + * @throws GridException In case of error. + */ + @Nullable public IgniteFsFile info(IgniteFsPath path) throws GridException; + + /** + * Gets used space in bytes. + * + * @return Used space in bytes. + * @throws GridException In case of error. + */ + public long usedSpaceSize() throws GridException; + + /** + * Gets the implementation specific properties of file system. + * + * @return Map of properties. + */ + public Map<String,String> properties(); +} http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsGroupDataBlocksKeyMapper.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsGroupDataBlocksKeyMapper.java b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsGroupDataBlocksKeyMapper.java new file mode 100644 index 0000000..0410c17 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsGroupDataBlocksKeyMapper.java @@ -0,0 +1,93 @@ +/* @java.file.header */ + +/* _________ _____ __________________ _____ + * __ ____/___________(_)______ /__ ____/______ ____(_)_______ + * _ / __ __ ___/__ / _ __ / _ / __ _ __ `/__ / __ __ \ + * / /_/ / _ / _ / / /_/ / / /_/ / / /_/ / _ / _ / / / + * \____/ /_/ /_/ \_,__/ \____/ \__,_/ /_/ /_/ /_/ + */ + +package org.apache.ignite.fs; + +import org.gridgain.grid.kernal.processors.cache.*; +import org.gridgain.grid.kernal.processors.ggfs.*; +import org.gridgain.grid.util.typedef.internal.*; + +/** + * {@code GGFS} class providing ability to group file's data blocks together on one node. + * All blocks within the same group are guaranteed to be cached together on the same node. + * Group size parameter controls how many sequential blocks will be cached together on the same node. + * <p> + * For example, if block size is {@code 64kb} and group size is {@code 256}, then each group will contain + * {@code 64kb * 256 = 16Mb}. Larger group sizes would reduce number of splits required to run map-reduce + * tasks, but will increase inequality of data size being stored on different nodes. + * <p> + * Note that {@link #groupSize()} parameter must correlate to Hadoop split size parameter defined + * in Hadoop via {@code mapred.max.split.size} property. Ideally you want all blocks accessed + * within one split to be mapped to {@code 1} group, so they can be located on the same grid node. + * For example, default Hadoop split size is {@code 64mb} and default {@code GGFS} block size + * is {@code 64kb}. This means that to make sure that each split goes only through blocks on + * the same node (without hopping between nodes over network), we have to make the {@link #groupSize()} + * value be equal to {@code 64mb / 64kb = 1024}. + * <p> + * It is required for {@code GGFS} data cache to be configured with this mapper. Here is an + * example of how it can be specified in XML configuration: + * <pre name="code" class="xml"> + * <bean id="cacheCfgBase" class="org.gridgain.grid.cache.GridCacheConfiguration" abstract="true"> + * ... + * <property name="affinityMapper"> + * <bean class="org.gridgain.grid.ggfs.GridGgfsGroupDataBlocksKeyMapper"> + * <!-- How many sequential blocks will be stored on the same node. --> + * <constructor-arg value="512"/> + * </bean> + * </property> + * ... + * </bean> + * </pre> + */ +public class IgniteFsGroupDataBlocksKeyMapper extends GridCacheDefaultAffinityKeyMapper { + /** */ + private static final long serialVersionUID = 0L; + + /** Size of the group. */ + private final int grpSize; + + /*** + * Constructs affinity mapper to group several data blocks with the same key. + * + * @param grpSize Size of the group in blocks. + */ + public IgniteFsGroupDataBlocksKeyMapper(int grpSize) { + A.ensure(grpSize >= 1, "grpSize >= 1"); + + this.grpSize = grpSize; + } + + /** {@inheritDoc} */ + @Override public Object affinityKey(Object key) { + if (key != null && GridGgfsBlockKey.class.equals(key.getClass())) { + GridGgfsBlockKey blockKey = (GridGgfsBlockKey)key; + + if (blockKey.affinityKey() != null) + return blockKey.affinityKey(); + + long grpId = blockKey.getBlockId() / grpSize; + + return blockKey.getFileId().hashCode() + (int)(grpId ^ (grpId >>> 32)); + } + + return super.affinityKey(key); + } + + /** + * @return Size of the group. + */ + public int groupSize() { + return grpSize; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(IgniteFsGroupDataBlocksKeyMapper.class, this); + } +} http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsInputStream.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsInputStream.java b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsInputStream.java new file mode 100644 index 0000000..d98d051 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsInputStream.java @@ -0,0 +1,72 @@ +/* @java.file.header */ + +/* _________ _____ __________________ _____ + * __ ____/___________(_)______ /__ ____/______ ____(_)_______ + * _ / __ __ ___/__ / _ __ / _ / __ _ __ `/__ / __ __ \ + * / /_/ / _ / _ / / /_/ / / /_/ / / /_/ / _ / _ / / / + * \____/ /_/ /_/ \_,__/ \____/ \__,_/ /_/ /_/ /_/ + */ + +package org.apache.ignite.fs; + +import java.io.*; + +/** + * {@code GGFS} input stream to read data from the file system. + * It provides several additional methods for asynchronous access. + */ +public abstract class IgniteFsInputStream extends InputStream implements IgniteFsReader { + /** + * Gets file length during file open. + * + * @return File length. + */ + public abstract long length(); + + /** + * Seek to the specified position. + * + * @param pos Position to seek to. + * @throws IOException In case of IO exception. + */ + public abstract void seek(long pos) throws IOException; + + /** + * Get the current position in the input stream. + * + * @return The current position in the input stream. + * @throws IOException In case of IO exception. + */ + public abstract long position() throws IOException; + + /** + * Read bytes from the given position in the stream to the given buffer. + * Continues to read until passed buffer becomes filled. + * + * @param pos Position in the input stream to seek. + * @param buf Buffer into which data is read. + * @throws IOException In case of IO exception. + */ + public abstract void readFully(long pos, byte[] buf) throws IOException; + + /** + * + * @param pos Position in the input stream to seek. + * @param buf Buffer into which data is read. + * @param off Offset in the buffer from which stream data should be written. + * @param len The number of bytes to read. + * @throws IOException In case of IO exception. + */ + public abstract void readFully(long pos, byte[] buf, int off, int len) throws IOException; + + /** + * + * @param pos Position in the input stream to seek. + * @param buf Buffer into which data is read. + * @param off Offset in the buffer from which stream data should be written. + * @param len The number of bytes to read. + * @return Total number of bytes read into the buffer, or -1 if there is no more data (EOF). + * @throws IOException In case of IO exception. + */ + @Override public abstract int read(long pos, byte[] buf, int off, int len) throws IOException; +} http://git-wip-us.apache.org/repos/asf/incubator-ignite/blob/81e01957/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsInvalidHdfsVersionException.java ---------------------------------------------------------------------- diff --git a/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsInvalidHdfsVersionException.java b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsInvalidHdfsVersionException.java new file mode 100644 index 0000000..4edd97b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/fs/IgniteFsInvalidHdfsVersionException.java @@ -0,0 +1,34 @@ +/* @java.file.header */ + +/* _________ _____ __________________ _____ + * __ ____/___________(_)______ /__ ____/______ ____(_)_______ + * _ / __ __ ___/__ / _ __ / _ / __ _ __ `/__ / __ __ \ + * / /_/ / _ / _ / / /_/ / / /_/ / / /_/ / _ / _ / / / + * \____/ /_/ /_/ \_,__/ \____/ \__,_/ /_/ /_/ /_/ + */ + +package org.apache.ignite.fs; + +/** + * Exception thrown when GridGain detects that remote HDFS version differs from version of HDFS libraries + * in GridGain classpath. + */ +public class IgniteFsInvalidHdfsVersionException extends IgniteFsException { + /** */ + private static final long serialVersionUID = 0L; + + /** + * @param msg Error message. + */ + public IgniteFsInvalidHdfsVersionException(String msg) { + super(msg); + } + + /** + * @param msg Error message. + * @param cause Error cause. + */ + public IgniteFsInvalidHdfsVersionException(String msg, Throwable cause) { + super(msg, cause); + } +}