Author: ningjiang
Date: Fri Oct 21 04:05:31 2011
New Revision: 1187176
URL: http://svn.apache.org/viewvc?rev=1187176&view=rev
Log:
CAMEL-4562 Add OSGi integration test for Hdfs component with thanks to Ioannis
Added:
camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/
camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsBlueprintRouteTest.java
(with props)
camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsRouteTest.java
(with props)
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/blueprintCamelContext.xml
(with props)
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/core-default.xml
(with props)
Modified:
camel/trunk/tests/camel-itest-osgi/pom.xml
Modified: camel/trunk/tests/camel-itest-osgi/pom.xml
URL:
http://svn.apache.org/viewvc/camel/trunk/tests/camel-itest-osgi/pom.xml?rev=1187176&r1=1187175&r2=1187176&view=diff
==============================================================================
--- camel/trunk/tests/camel-itest-osgi/pom.xml (original)
+++ camel/trunk/tests/camel-itest-osgi/pom.xml Fri Oct 21 04:05:31 2011
@@ -163,6 +163,11 @@
</dependency>
<dependency>
<groupId>org.apache.camel</groupId>
+ <artifactId>camel-hdfs</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.camel</groupId>
<artifactId>camel-hl7</artifactId>
<scope>test</scope>
</dependency>
Added:
camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsBlueprintRouteTest.java
URL:
http://svn.apache.org/viewvc/camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsBlueprintRouteTest.java?rev=1187176&view=auto
==============================================================================
---
camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsBlueprintRouteTest.java
(added)
+++
camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsBlueprintRouteTest.java
Fri Oct 21 04:05:31 2011
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.itest.osgi.hdfs;
+
+import java.io.File;
+import java.io.InputStream;
+import org.apache.camel.CamelContext;
+import org.apache.camel.ProducerTemplate;
+import org.apache.camel.component.mock.MockEndpoint;
+import org.apache.camel.itest.osgi.blueprint.OSGiBlueprintTestSupport;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.ops4j.pax.exam.Customizer;
+import org.ops4j.pax.exam.Option;
+import org.ops4j.pax.exam.junit.Configuration;
+import org.ops4j.pax.exam.junit.JUnit4TestRunner;
+import org.osgi.framework.Constants;
+
+
+import static org.ops4j.pax.exam.CoreOptions.felix;
+import static org.ops4j.pax.exam.OptionUtils.combine;
+import static org.ops4j.pax.exam.container.def.PaxRunnerOptions.scanFeatures;
+import static org.ops4j.pax.exam.container.def.PaxRunnerOptions.vmOption;
+import static
org.ops4j.pax.exam.container.def.PaxRunnerOptions.workingDirectory;
+import static org.ops4j.pax.swissbox.tinybundles.core.TinyBundles.modifyBundle;
+
+@RunWith(JUnit4TestRunner.class)
+public class HdfsBlueprintRouteTest extends OSGiBlueprintTestSupport {
+ //Hadoop doesn't run on IBM JDK
+ private static final boolean SKIP =
System.getProperty("java.vendor").contains("IBM");
+ private static final File HOME = new File("target/paxrunner/");
+
+ @Test
+ public void testWriteAndReadString() throws Exception {
+ if (SKIP) {
+ return;
+ }
+
+ getInstalledBundle("CamelBlueprintHdfsTestBundle").start();
+ CamelContext ctx = getOsgiService(CamelContext.class,
"(camel.context.symbolicname=CamelBlueprintHdfsTestBundle)", 20000);
+
+ ProducerTemplate template = ctx.createProducerTemplate();
+ template.sendBody("direct:start", "CIAO");
+
+ MockEndpoint resultEndpoint = (MockEndpoint)
ctx.getEndpoint("mock:result");
+ resultEndpoint.expectedMessageCount(1);
+ resultEndpoint.assertIsSatisfied();
+ }
+
+ @Configuration
+ public static Option[] configure() throws Exception {
+
+ Option[] options = combine(
+ getDefaultCamelKarafOptions(),
+ new Customizer() {
+ @Override
+ public InputStream customizeTestProbe(InputStream
testProbe) {
+ return modifyBundle(testProbe)
+ .add("core-default.xml",
HdfsRouteTest.class.getResource("core-default.xml"))
+ .add("OSGI-INF/blueprint/test.xml",
HdfsRouteTest.class.getResource("blueprintCamelContext.xml"))
+ .set(Constants.BUNDLE_SYMBOLICNAME,
"CamelBlueprintHdfsTestBundle")
+ .set(Constants.DYNAMICIMPORT_PACKAGE, "*")
+ .build();
+ }
+ },
+ // using the features to install the camel components
+ scanFeatures(getCamelKarafFeatureUrl(),
+ "camel-blueprint", "camel-hdfs"),
+ workingDirectory("target/paxrunner/"),
+ vmOption("-Dkaraf.base=" + HOME.getAbsolutePath()),
+
//vmOption("-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005"),
+ felix());
+
+ return options;
+ }
+}
\ No newline at end of file
Propchange:
camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsBlueprintRouteTest.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange:
camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsBlueprintRouteTest.java
------------------------------------------------------------------------------
svn:keywords = Rev Date
Added:
camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsRouteTest.java
URL:
http://svn.apache.org/viewvc/camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsRouteTest.java?rev=1187176&view=auto
==============================================================================
---
camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsRouteTest.java
(added)
+++
camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsRouteTest.java
Fri Oct 21 04:05:31 2011
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.itest.osgi.hdfs;
+
+import java.io.File;
+import java.io.InputStream;
+import org.apache.camel.builder.RouteBuilder;
+import org.apache.camel.component.mock.MockEndpoint;
+import org.apache.camel.itest.osgi.OSGiIntegrationTestSupport;
+import org.apache.camel.itest.osgi.jpa.SendEmail;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.karaf.testing.Helper;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.ops4j.pax.exam.Customizer;
+import org.ops4j.pax.exam.Option;
+import org.ops4j.pax.exam.junit.Configuration;
+import org.ops4j.pax.exam.junit.JUnit4TestRunner;
+import org.osgi.framework.Constants;
+
+
+import static org.apache.hadoop.io.SequenceFile.createWriter;
+import static org.ops4j.pax.exam.CoreOptions.equinox;
+import static org.ops4j.pax.exam.CoreOptions.felix;
+import static org.ops4j.pax.exam.OptionUtils.combine;
+import static org.ops4j.pax.exam.container.def.PaxRunnerOptions.scanFeatures;
+import static
org.ops4j.pax.exam.container.def.PaxRunnerOptions.workingDirectory;
+import static org.ops4j.pax.swissbox.tinybundles.core.TinyBundles.modifyBundle;
+
+@RunWith(JUnit4TestRunner.class)
+public class HdfsRouteTest extends OSGiIntegrationTestSupport {
+ //Hadoop doesn't run on IBM JDK
+ private static final boolean SKIP =
System.getProperty("java.vendor").contains("IBM");
+
+ @Test
+ public void testReadString() throws Exception {
+ if (SKIP) {
+ return;
+ }
+
+ final Path file = new Path(new
File("target/test/test-camel-string").getAbsolutePath());
+ org.apache.hadoop.conf.Configuration conf = new
org.apache.hadoop.conf.Configuration();
+ FileSystem fs1 = FileSystem.get(file.toUri(), conf);
+ SequenceFile.Writer writer = createWriter(fs1, conf, file,
NullWritable.class, Text.class);
+ NullWritable keyWritable = NullWritable.get();
+ Text valueWritable = new Text();
+ String value = "CIAO!";
+ valueWritable.set(value);
+ writer.append(keyWritable, valueWritable);
+ writer.sync();
+ writer.close();
+
+ context.addRoutes(new RouteBuilder() {
+ public void configure() {
+ from("hdfs:///" + file.toUri() +
"?fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0").to("mock:result");
+ }
+ });
+ context.start();
+
+ MockEndpoint resultEndpoint = (MockEndpoint)
context.getEndpoint("mock:result");
+ resultEndpoint.expectedMessageCount(1);
+ resultEndpoint.assertIsSatisfied();
+ }
+
+ @Configuration
+ public static Option[] configure() throws Exception {
+ Option[] options = combine(
+ // Default karaf environment
+ Helper.getDefaultOptions(
+ // this is how you set the default log level when
using pax logging (logProfile)
+ Helper.setLogLevel("WARN")),
+ new Customizer() {
+ @Override
+ public InputStream customizeTestProbe(InputStream
testProbe) {
+ return modifyBundle(testProbe)
+ .add(SendEmail.class)
+ .add("core-default.xml",
HdfsRouteTest.class.getResource("core-default.xml"))
+ //.add("hdfs-default.xml",
HdfsRouteTest.class.getResource("hdfs-default.xml"))
+ .set(Constants.BUNDLE_SYMBOLICNAME,
"CamelHdfsTestBundle")
+ .set(Constants.DYNAMICIMPORT_PACKAGE, "*")
+ .build();
+ }
+ },
+
+ // install the spring.
+ scanFeatures(getKarafFeatureUrl(), "spring"),
+ // using the features to install the camel components
+ scanFeatures(getCamelKarafFeatureUrl(),
+ "camel-core", "camel-spring", "camel-test",
"camel-hdfs"),
+ workingDirectory("target/paxrunner/"),
+ //vmOption("-Xdebug
-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005"),
+ felix(), equinox());
+
+ return options;
+ }
+}
Propchange:
camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsRouteTest.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange:
camel/trunk/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsRouteTest.java
------------------------------------------------------------------------------
svn:keywords = Rev Date
Added:
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/blueprintCamelContext.xml
URL:
http://svn.apache.org/viewvc/camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/blueprintCamelContext.xml?rev=1187176&view=auto
==============================================================================
---
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/blueprintCamelContext.xml
(added)
+++
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/blueprintCamelContext.xml
Fri Oct 21 04:05:31 2011
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright 2006 The Apache Software Foundation.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
+
xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0"
+
xmlns:ext="http://aries.apache.org/blueprint/xmlns/blueprint-ext/v1.0.0"
+ default-activation="lazy">
+
+ <ext:property-placeholder id="external" placeholder-prefix="$["
placeholder-suffix="]"/>
+
+ <camelContext xmlns="http://camel.apache.org/schema/blueprint">
+
+ <!-- using Camel properties component and refer to the blueprint
property placeholder by its id -->
+ <propertyPlaceholder id="properties" location="blueprint:external"
+ prefixToken="[[" suffixToken="]]"
+ propertyPrefix="prefix."/>
+ <route>
+ <from uri="direct:start"/>
+ <to
uri="hdfs://[[karaf.base]]/hdfs/test-camel?fileSystemType=LOCAL&splitStrategy=BYTES:5,IDLE:1000"/>
+ </route>
+ <route>
+ <from
uri="hdfs://[[karaf.base]]/hdfs/test-camel?pattern=seg*&initialDelay=2000&fileSystemType=LOCAL&chunkSize=5"/>
+ <to uri="mock:result"/>
+ </route>
+ </camelContext>
+
+</blueprint>
Propchange:
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/blueprintCamelContext.xml
------------------------------------------------------------------------------
svn:eol-style = native
Propchange:
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/blueprintCamelContext.xml
------------------------------------------------------------------------------
svn:keywords = Rev Date
Propchange:
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/blueprintCamelContext.xml
------------------------------------------------------------------------------
svn:mime-type = text/xml
Added:
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/core-default.xml
URL:
http://svn.apache.org/viewvc/camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/core-default.xml?rev=1187176&view=auto
==============================================================================
---
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/core-default.xml
(added)
+++
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/core-default.xml
Fri Oct 21 04:05:31 2011
@@ -0,0 +1,487 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Copyright 2006 The Apache Software Foundation.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Do not modify this file directly. Instead, copy entries that you -->
+<!-- wish to modify from this file into core-site.xml and change them -->
+<!-- there. If core-site.xml does not already exist, create it. -->
+
+<configuration>
+
+<!--- global properties -->
+
+<property>
+ <name>hadoop.tmp.dir</name>
+ <value>/tmp/hadoop-${user.name}</value>
+ <description>A base for other temporary directories.</description>
+</property>
+
+<property>
+ <name>hadoop.native.lib</name>
+ <value>true</value>
+ <description>Should native hadoop libraries, if present, be
used.</description>
+</property>
+
+<property>
+ <name>hadoop.http.filter.initializers</name>
+ <value></value>
+ <description>A comma separated list of class names. Each class in the list
+ must extend org.apache.hadoop.http.FilterInitializer. The corresponding
+ Filter will be initialized. Then, the Filter will be applied to all user
+ facing jsp and servlet web pages. The ordering of the list defines the
+ ordering of the filters.</description>
+</property>
+
+ <property>
+ <name>hadoop.security.group.mapping</name>
+ <value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value>
+ <description>Class for user to group mapping (get groups for a given user)
+ </description>
+</property>
+
+<property>
+ <name>hadoop.security.authorization</name>
+ <value>false</value>
+ <description>Is service-level authorization enabled?</description>
+</property>
+
+<property>
+ <name>hadoop.security.authentication</name>
+ <value>simple</value>
+ <description>Possible values are simple (no authentication), and kerberos
+ </description>
+</property>
+
+<!--
+<property>
+ <name>hadoop.security.service.user.name.key</name>
+ <value></value>
+ <description>Name of the kerberos principal of the user that owns
+ a given service daemon
+ </description>
+</property>
+-->
+
+<!--- logging properties -->
+
+<property>
+ <name>hadoop.logfile.size</name>
+ <value>10000000</value>
+ <description>The max size of each log file</description>
+</property>
+
+<property>
+ <name>hadoop.logfile.count</name>
+ <value>10</value>
+ <description>The max number of log files</description>
+</property>
+
+<!-- i/o properties -->
+<property>
+ <name>io.file.buffer.size</name>
+ <value>4096</value>
+ <description>The size of buffer for use in sequence files.
+ The size of this buffer should probably be a multiple of hardware
+ page size (4096 on Intel x86), and it determines how much data is
+ buffered during read and write operations.</description>
+</property>
+
+<property>
+ <name>io.bytes.per.checksum</name>
+ <value>512</value>
+ <description>The number of bytes per checksum. Must not be larger than
+ io.file.buffer.size.</description>
+</property>
+
+<property>
+ <name>io.skip.checksum.errors</name>
+ <value>false</value>
+ <description>If true, when a checksum error is encountered while
+ reading a sequence file, entries are skipped, instead of throwing an
+ exception.</description>
+</property>
+
+<property>
+ <name>io.compression.codecs</name>
+
<value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec</value>
+ <description>A list of the compression codec classes that can be used
+ for compression/decompression.</description>
+</property>
+
+<property>
+ <name>io.serializations</name>
+ <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+ <description>A list of serialization classes that can be used for
+ obtaining serializers and deserializers.</description>
+</property>
+
+<!-- file system properties -->
+
+<property>
+ <name>fs.default.name</name>
+ <value>file:///</value>
+ <description>The name of the default file system. A URI whose
+ scheme and authority determine the FileSystem implementation. The
+ uri's scheme determines the config property (fs.SCHEME.impl) naming
+ the FileSystem implementation class. The uri's authority is used to
+ determine the host, port, etc. for a filesystem.</description>
+</property>
+
+<property>
+ <name>fs.trash.interval</name>
+ <value>0</value>
+ <description>Number of minutes between trash checkpoints.
+ If zero, the trash feature is disabled.
+ </description>
+</property>
+
+<property>
+ <name>fs.file.impl</name>
+ <value>org.apache.hadoop.fs.LocalFileSystem</value>
+ <description>The FileSystem for file: uris.</description>
+</property>
+
+<property>
+ <name>fs.hdfs.impl</name>
+ <value>org.apache.hadoop.hdfs.DistributedFileSystem</value>
+ <description>The FileSystem for hdfs: uris.</description>
+</property>
+
+<property>
+ <name>fs.s3.impl</name>
+ <value>org.apache.hadoop.fs.s3.S3FileSystem</value>
+ <description>The FileSystem for s3: uris.</description>
+</property>
+
+<property>
+ <name>fs.s3n.impl</name>
+ <value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value>
+ <description>The FileSystem for s3n: (Native S3) uris.</description>
+</property>
+
+<property>
+ <name>fs.kfs.impl</name>
+ <value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value>
+ <description>The FileSystem for kfs: uris.</description>
+</property>
+
+<property>
+ <name>fs.hftp.impl</name>
+ <value>org.apache.hadoop.hdfs.HftpFileSystem</value>
+</property>
+
+<property>
+ <name>fs.hsftp.impl</name>
+ <value>org.apache.hadoop.hdfs.HsftpFileSystem</value>
+</property>
+
+<property>
+ <name>fs.ftp.impl</name>
+ <value>org.apache.hadoop.fs.ftp.FTPFileSystem</value>
+ <description>The FileSystem for ftp: uris.</description>
+</property>
+
+<property>
+ <name>fs.ramfs.impl</name>
+ <value>org.apache.hadoop.fs.InMemoryFileSystem</value>
+ <description>The FileSystem for ramfs: uris.</description>
+</property>
+
+<property>
+ <name>fs.har.impl</name>
+ <value>org.apache.hadoop.fs.HarFileSystem</value>
+ <description>The filesystem for Hadoop archives. </description>
+</property>
+
+<property>
+ <name>fs.har.impl.disable.cache</name>
+ <value>true</value>
+ <description>Don't cache 'har' filesystem instances.</description>
+</property>
+
+<property>
+ <name>fs.checkpoint.dir</name>
+ <value>${hadoop.tmp.dir}/dfs/namesecondary</value>
+ <description>Determines where on the local filesystem the DFS secondary
+ name node should store the temporary images to merge.
+ If this is a comma-delimited list of directories then the image is
+ replicated in all of the directories for redundancy.
+ </description>
+</property>
+
+<property>
+ <name>fs.checkpoint.edits.dir</name>
+ <value>${fs.checkpoint.dir}</value>
+ <description>Determines where on the local filesystem the DFS secondary
+ name node should store the temporary edits to merge.
+ If this is a comma-delimited list of directoires then teh edits is
+ replicated in all of the directoires for redundancy.
+ Default value is same as fs.checkpoint.dir
+ </description>
+</property>
+
+<property>
+ <name>fs.checkpoint.period</name>
+ <value>3600</value>
+ <description>The number of seconds between two periodic checkpoints.
+ </description>
+</property>
+
+<property>
+ <name>fs.checkpoint.size</name>
+ <value>67108864</value>
+ <description>The size of the current edit log (in bytes) that triggers
+ a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+ </description>
+</property>
+
+
+
+<property>
+ <name>fs.s3.block.size</name>
+ <value>67108864</value>
+ <description>Block size to use when writing files to S3.</description>
+</property>
+
+<property>
+ <name>fs.s3.buffer.dir</name>
+ <value>${hadoop.tmp.dir}/s3</value>
+ <description>Determines where on the local filesystem the S3 filesystem
+ should store files before sending them to S3
+ (or after retrieving them from S3).
+ </description>
+</property>
+
+<property>
+ <name>fs.s3.maxRetries</name>
+ <value>4</value>
+ <description>The maximum number of retries for reading or writing files to
S3,
+ before we signal failure to the application.
+ </description>
+</property>
+
+<property>
+ <name>fs.s3.sleepTimeSeconds</name>
+ <value>10</value>
+ <description>The number of seconds to sleep between each S3 retry.
+ </description>
+</property>
+
+
+<property>
+ <name>local.cache.size</name>
+ <value>10737418240</value>
+ <description>The limit on the size of cache you want to keep, set by default
+ to 10GB. This will act as a soft limit on the cache directory for out of
band data.
+ </description>
+</property>
+
+<property>
+ <name>io.seqfile.compress.blocksize</name>
+ <value>1000000</value>
+ <description>The minimum block size for compression in block compressed
+ SequenceFiles.
+ </description>
+</property>
+
+<property>
+ <name>io.seqfile.lazydecompress</name>
+ <value>true</value>
+ <description>Should values of block-compressed SequenceFiles be decompressed
+ only when necessary.
+ </description>
+</property>
+
+<property>
+ <name>io.seqfile.sorter.recordlimit</name>
+ <value>1000000</value>
+ <description>The limit on number of records to be kept in memory in a spill
+ in SequenceFiles.Sorter
+ </description>
+</property>
+
+ <property>
+ <name>io.mapfile.bloom.size</name>
+ <value>1048576</value>
+ <description>The size of BloomFilter-s used in BloomMapFile. Each time this
many
+ keys is appended the next BloomFilter will be created (inside a
DynamicBloomFilter).
+ Larger values minimize the number of filters, which slightly increases the
performance,
+ but may waste too much space if the total number of keys is usually much
smaller
+ than this number.
+ </description>
+</property>
+
+<property>
+ <name>io.mapfile.bloom.error.rate</name>
+ <value>0.005</value>
+ <description>The rate of false positives in BloomFilter-s used in
BloomMapFile.
+ As this value decreases, the size of BloomFilter-s increases exponentially.
This
+ value is the probability of encountering false positives (default is 0.5%).
+ </description>
+</property>
+
+<property>
+ <name>hadoop.util.hash.type</name>
+ <value>murmur</value>
+ <description>The default implementation of Hash. Currently this can take one
of the
+ two values: 'murmur' to select MurmurHash and 'jenkins' to select
JenkinsHash.
+ </description>
+</property>
+
+
+<!-- ipc properties -->
+
+<property>
+ <name>ipc.client.idlethreshold</name>
+ <value>4000</value>
+ <description>Defines the threshold number of connections after which
+ connections will be inspected for idleness.
+ </description>
+</property>
+
+<property>
+ <name>ipc.client.kill.max</name>
+ <value>10</value>
+ <description>Defines the maximum number of clients to disconnect in one go.
+ </description>
+</property>
+
+<property>
+ <name>ipc.client.connection.maxidletime</name>
+ <value>10000</value>
+ <description>The maximum time in msec after which a client will bring down
the
+ connection to the server.
+ </description>
+</property>
+
+<property>
+ <name>ipc.client.connect.max.retries</name>
+ <value>10</value>
+ <description>Indicates the number of retries a client will make to establish
+ a server connection.
+ </description>
+</property>
+
+<property>
+ <name>ipc.server.listen.queue.size</name>
+ <value>128</value>
+ <description>Indicates the length of the listen queue for servers accepting
+ client connections.
+ </description>
+</property>
+
+<property>
+ <name>ipc.server.tcpnodelay</name>
+ <value>false</value>
+ <description>Turn on/off Nagle's algorithm for the TCP socket connection on
+ the server. Setting to true disables the algorithm and may decrease latency
+ with a cost of more/smaller packets.
+ </description>
+</property>
+
+<property>
+ <name>ipc.client.tcpnodelay</name>
+ <value>false</value>
+ <description>Turn on/off Nagle's algorithm for the TCP socket connection on
+ the client. Setting to true disables the algorithm and may decrease latency
+ with a cost of more/smaller packets.
+ </description>
+</property>
+
+
+<!-- Web Interface Configuration -->
+
+<property>
+ <name>webinterface.private.actions</name>
+ <value>false</value>
+ <description> If set to true, the web interfaces of JT and NN may contain
+ actions, such as kill job, delete file, etc., that should
+ not be exposed to public. Enable this option if the interfaces
+ are only reachable by those who have the right authorization.
+ </description>
+</property>
+
+<!-- Proxy Configuration -->
+
+<property>
+ <name>hadoop.rpc.socket.factory.class.default</name>
+ <value>org.apache.hadoop.net.StandardSocketFactory</value>
+ <description> Default SocketFactory to use. This parameter is expected to be
+ formatted as "package.FactoryClassName".
+ </description>
+</property>
+
+<property>
+ <name>hadoop.rpc.socket.factory.class.ClientProtocol</name>
+ <value></value>
+ <description> SocketFactory to use to connect to a DFS. If null or empty, use
+ hadoop.rpc.socket.class.default. This socket factory is also used by
+ DFSClient to create sockets to DataNodes.
+ </description>
+</property>
+
+
+
+<property>
+ <name>hadoop.socks.server</name>
+ <value></value>
+ <description> Address (host:port) of the SOCKS server to be used by the
+ SocksSocketFactory.
+ </description>
+</property>
+
+<!-- Rack Configuration -->
+
+<property>
+ <name>topology.node.switch.mapping.impl</name>
+ <value>org.apache.hadoop.net.ScriptBasedMapping</value>
+ <description> The default implementation of the DNSToSwitchMapping. It
+ invokes a script specified in topology.script.file.name to resolve
+ node names. If the value for topology.script.file.name is not set, the
+ default value of DEFAULT_RACK is returned for all node names.
+ </description>
+</property>
+
+<property>
+ <name>topology.script.file.name</name>
+ <value></value>
+ <description> The script name that should be invoked to resolve DNS names to
+ NetworkTopology names. Example: the script would take host.foo.bar as an
+ argument, and return /rack1 as the output.
+ </description>
+</property>
+
+<property>
+ <name>topology.script.number.args</name>
+ <value>100</value>
+ <description> The max number of args that the script configured with
+ topology.script.file.name should be run with. Each arg is an
+ IP address.
+ </description>
+</property>
+
+<property>
+ <name>hadoop.security.uid.cache.secs</name>
+ <value>14400</value>
+ <description> NativeIO maintains a cache from UID to UserName. This is
+ the timeout for an entry in that cache. </description>
+</property>
+
+
+</configuration>
Propchange:
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/core-default.xml
------------------------------------------------------------------------------
svn:eol-style = native
Propchange:
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/core-default.xml
------------------------------------------------------------------------------
svn:keywords = Rev Date
Propchange:
camel/trunk/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/core-default.xml
------------------------------------------------------------------------------
svn:mime-type = text/xml