KYLIN-2307 Create a branch for master with HBase 0.98 API
Project: http://git-wip-us.apache.org/repos/asf/kylin/repo Commit: http://git-wip-us.apache.org/repos/asf/kylin/commit/960eeb1f Tree: http://git-wip-us.apache.org/repos/asf/kylin/tree/960eeb1f Diff: http://git-wip-us.apache.org/repos/asf/kylin/diff/960eeb1f Branch: refs/heads/2.0.x-hbase0.98 Commit: 960eeb1f296595ee238e6314f365738c09cfc70e Parents: 3fb74fe Author: lidongsjtu <lid...@apache.org> Authored: Mon Jan 23 13:17:37 2017 +0800 Committer: Hongbin Ma <mahong...@apache.org> Committed: Thu Apr 20 10:54:42 2017 +0800 ---------------------------------------------------------------------- dev-support/test_all_against_hdp_2_2_4_2_2.sh | 25 ++++ dev-support/test_all_against_hdp_2_4_0_0_169.sh | 25 ---- .../sandbox/capacity-scheduler.xml | 17 ++- examples/test_case_data/sandbox/core-site.xml | 28 +--- examples/test_case_data/sandbox/hbase-site.xml | 119 +++++------------ examples/test_case_data/sandbox/hdfs-site.xml | 84 +++++------- examples/test_case_data/sandbox/hive-site.xml | 89 +++++-------- examples/test_case_data/sandbox/mapred-site.xml | 57 +++------ examples/test_case_data/sandbox/yarn-site.xml | 127 +++---------------- .../kylin/provision/BuildCubeWithEngine.java | 17 +-- pom.xml | 117 +---------------- .../kylin/rest/security/AclHBaseStorage.java | 4 +- .../rest/security/MockAclHBaseStorage.java | 8 +- .../apache/kylin/rest/security/MockHTable.java | 95 +++++++++++--- .../rest/security/RealAclHBaseStorage.java | 9 +- .../apache/kylin/rest/service/AclService.java | 25 ++-- .../apache/kylin/rest/service/CubeService.java | 35 +++-- .../apache/kylin/rest/service/QueryService.java | 24 ++-- .../apache/kylin/rest/service/UserService.java | 17 +-- .../kylin/storage/hbase/HBaseConnection.java | 44 +++---- .../kylin/storage/hbase/HBaseResourceStore.java | 31 +++-- .../storage/hbase/cube/SimpleHBaseStore.java | 20 +-- .../hbase/cube/v2/CubeHBaseEndpointRPC.java | 13 +- .../storage/hbase/cube/v2/CubeHBaseScanRPC.java | 9 +- .../coprocessor/endpoint/CubeVisitService.java | 4 +- .../storage/hbase/steps/CubeHTableUtil.java | 16 +-- .../storage/hbase/steps/DeprecatedGCStep.java | 24 ++-- .../storage/hbase/steps/HBaseCuboidWriter.java | 7 +- .../kylin/storage/hbase/steps/MergeGCStep.java | 23 ++-- .../storage/hbase/util/CleanHtableCLI.java | 12 +- .../storage/hbase/util/CubeMigrationCLI.java | 37 +++--- .../hbase/util/CubeMigrationCheckCLI.java | 17 +-- .../hbase/util/DeployCoprocessorCLI.java | 27 ++-- .../hbase/util/ExtendCubeToHybridCLI.java | 8 +- .../hbase/util/GridTableHBaseBenchmark.java | 34 ++--- .../kylin/storage/hbase/util/HBaseClean.java | 18 ++- .../hbase/util/HBaseRegionSizeCalculator.java | 35 +++-- .../kylin/storage/hbase/util/HBaseUsage.java | 9 +- .../storage/hbase/util/HbaseStreamingInput.java | 30 ++--- .../hbase/util/HtableAlterMetadataCLI.java | 9 +- .../storage/hbase/util/OrphanHBaseCleanJob.java | 19 +-- .../kylin/storage/hbase/util/PingHBaseCLI.java | 15 +-- .../kylin/storage/hbase/util/RowCounterCLI.java | 11 +- .../storage/hbase/util/StorageCleanupJob.java | 20 ++- .../storage/hbase/util/UpdateHTableHostCLI.java | 17 +-- tool/pom.xml | 10 -- .../org/apache/kylin/tool/CubeMigrationCLI.java | 16 ++- .../kylin/tool/ExtendCubeToHybridCLI.java | 8 +- 48 files changed, 595 insertions(+), 870 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/dev-support/test_all_against_hdp_2_2_4_2_2.sh ---------------------------------------------------------------------- diff --git a/dev-support/test_all_against_hdp_2_2_4_2_2.sh b/dev-support/test_all_against_hdp_2_2_4_2_2.sh new file mode 100755 index 0000000..f7780dd --- /dev/null +++ b/dev-support/test_all_against_hdp_2_2_4_2_2.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +dir=$(dirname ${0}) +cd ${dir} +cd .. + +mvn clean install -DskipTests 2>&1 | tee mci.log +mvn verify -Dhdp.version=${HDP_VERSION:-"2.2.4.2-2"} -fae 2>&1 | tee mvnverify.log http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/dev-support/test_all_against_hdp_2_4_0_0_169.sh ---------------------------------------------------------------------- diff --git a/dev-support/test_all_against_hdp_2_4_0_0_169.sh b/dev-support/test_all_against_hdp_2_4_0_0_169.sh deleted file mode 100755 index 2a3d24b..0000000 --- a/dev-support/test_all_against_hdp_2_4_0_0_169.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -dir=$(dirname ${0}) -cd ${dir} -cd .. - -mvn clean install -DskipTests 2>&1 | tee mci.log -mvn verify -Dhdp.version=${HDP_VERSION:-"2.4.0.0-169"} -fae 2>&1 | tee mvnverify.log http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/examples/test_case_data/sandbox/capacity-scheduler.xml ---------------------------------------------------------------------- diff --git a/examples/test_case_data/sandbox/capacity-scheduler.xml b/examples/test_case_data/sandbox/capacity-scheduler.xml index e042aa5..7cb985c 100644 --- a/examples/test_case_data/sandbox/capacity-scheduler.xml +++ b/examples/test_case_data/sandbox/capacity-scheduler.xml @@ -47,6 +47,16 @@ </property> <property> + <name>yarn.scheduler.capacity.root.accessible-node-labels.default.capacity</name> + <value>-1</value> + </property> + + <property> + <name>yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity</name> + <value>-1</value> + </property> + + <property> <name>yarn.scheduler.capacity.root.acl_administer_queue</name> <value>*</value> </property> @@ -57,6 +67,11 @@ </property> <property> + <name>yarn.scheduler.capacity.root.default-node-label-expression</name> + <value></value> + </property> + + <property> <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name> <value>*</value> </property> @@ -96,4 +111,4 @@ <value>default</value> </property> -</configuration> \ No newline at end of file +</configuration> http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/examples/test_case_data/sandbox/core-site.xml ---------------------------------------------------------------------- diff --git a/examples/test_case_data/sandbox/core-site.xml b/examples/test_case_data/sandbox/core-site.xml index a4ad5c6..0c5f62b 100644 --- a/examples/test_case_data/sandbox/core-site.xml +++ b/examples/test_case_data/sandbox/core-site.xml @@ -19,6 +19,7 @@ <property> <name>fs.defaultFS</name> <value>hdfs://sandbox.hortonworks.com:8020</value> + <final>true</final> </property> <property> @@ -38,7 +39,7 @@ <property> <name>hadoop.proxyuser.falcon.groups</name> - <value>*</value> + <value>users</value> </property> <property> @@ -48,7 +49,7 @@ <property> <name>hadoop.proxyuser.hbase.groups</name> - <value>*</value> + <value>users</value> </property> <property> @@ -67,23 +68,13 @@ </property> <property> - <name>hadoop.proxyuser.hdfs.groups</name> - <value>*</value> - </property> - - <property> - <name>hadoop.proxyuser.hdfs.hosts</name> - <value>*</value> - </property> - - <property> <name>hadoop.proxyuser.hive.groups</name> - <value>*</value> + <value>users</value> </property> <property> <name>hadoop.proxyuser.hive.hosts</name> - <value>sandbox.hortonworks.com</value> + <value>*</value> </property> <property> @@ -132,15 +123,8 @@ </property> <property> - <name>hadoop.security.key.provider.path</name> - <value></value> - </property> - - <property> <name>io.compression.codecs</name> - <value> - org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec - </value> + <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value> </property> <property> http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/examples/test_case_data/sandbox/hbase-site.xml ---------------------------------------------------------------------- diff --git a/examples/test_case_data/sandbox/hbase-site.xml b/examples/test_case_data/sandbox/hbase-site.xml index 568de2e..46d5345 100644 --- a/examples/test_case_data/sandbox/hbase-site.xml +++ b/examples/test_case_data/sandbox/hbase-site.xml @@ -22,33 +22,8 @@ </property> <property> - <name>hbase.bucketcache.ioengine</name> - <value></value> - </property> - - <property> - <name>hbase.bucketcache.percentage.in.combinedcache</name> - <value></value> - </property> - - <property> - <name>hbase.bucketcache.size</name> - <value></value> - </property> - - <property> - <name>hbase.bulkload.staging.dir</name> - <value>/apps/hbase/staging</value> - </property> - - <property> <name>hbase.client.keyvalue.maxsize</name> - <value>1048576</value> - </property> - - <property> - <name>hbase.client.retries.number</name> - <value>35</value> + <value>10485760</value> </property> <property> @@ -63,19 +38,12 @@ <property> <name>hbase.coprocessor.master.classes</name> - <value>org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor</value> + <value>com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor</value> </property> <property> <name>hbase.coprocessor.region.classes</name> - <value> - org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor - </value> - </property> - - <property> - <name>hbase.coprocessor.regionserver.classes</name> - <value></value> + <value>com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor</value> </property> <property> @@ -119,11 +87,6 @@ </property> <property> - <name>hbase.hstore.compaction.max</name> - <value>10</value> - </property> - - <property> <name>hbase.hstore.compactionThreshold</name> <value>3</value> </property> @@ -140,42 +103,32 @@ <property> <name>hbase.master.info.port</name> - <value>16010</value> + <value>60010</value> </property> <property> <name>hbase.master.port</name> - <value>16000</value> + <value>60000</value> </property> <property> - <name>hbase.region.server.rpc.scheduler.factory.class</name> - <value></value> + <name>hbase.regionserver.global.memstore.lowerLimit</name> + <value>0.38</value> </property> <property> - <name>hbase.regionserver.global.memstore.size</name> + <name>hbase.regionserver.global.memstore.upperLimit</name> <value>0.4</value> </property> <property> <name>hbase.regionserver.handler.count</name> - <value>30</value> + <value>60</value> </property> <property> <name>hbase.regionserver.info.port</name> - <value>16030</value> - </property> - - <property> - <name>hbase.regionserver.port</name> - <value>16020</value> - </property> - - <property> - <name>hbase.regionserver.wal.codec</name> - <value>org.apache.hadoop.hbase.regionserver.wal.WALCellCodec</value> + <value>60030</value> </property> <property> @@ -184,26 +137,11 @@ </property> <property> - <name>hbase.rpc.controllerfactory.class</name> - <value></value> - </property> - - <property> - <name>hbase.rpc.engine</name> - <value>org.apache.hadoop.hbase.ipc.SecureRpcEngine</value> - </property> - - <property> <name>hbase.rpc.protection</name> <value>PRIVACY</value> </property> <property> - <name>hbase.rpc.timeout</name> - <value>90000</value> - </property> - - <property> <name>hbase.security.authentication</name> <value>simple</value> </property> @@ -220,7 +158,7 @@ <property> <name>hbase.tmp.dir</name> - <value>/tmp/hbase-${user.name}</value> + <value>/hadoop/hbase</value> </property> <property> @@ -240,27 +178,34 @@ <property> <name>hfile.block.cache.size</name> - <value>0.4</value> - </property> - - <property> - <name>phoenix.functions.allowUserDefinedFunctions</name> - <value></value> - </property> - - <property> - <name>phoenix.query.timeoutMs</name> - <value>60000</value> + <value>0.40</value> </property> <property> <name>zookeeper.session.timeout</name> - <value>60000</value> + <value>30000</value> </property> <property> <name>zookeeper.znode.parent</name> <value>/hbase-unsecure</value> </property> - -</configuration> \ No newline at end of file + <property> + <name>hbase.client.pause</name> + <value>100</value> + <description>General client pause value. Used mostly as value to wait + before running a retry of a failed get, region lookup, etc. + See hbase.client.retries.number for description of how we backoff from + this initial pause amount and how this pause works w/ retries.</description> + </property> + <property> + <name>hbase.client.retries.number</name> + <value>5</value> + <description>Maximum retries. Used as maximum for all retryable + operations such as the getting of a cell's value, starting a row update, + etc. Retry interval is a rough function based on hbase.client.pause. At + first we retry at this interval but then with backoff, we pretty quickly reach + retrying every ten seconds. See HConstants#RETRY_BACKOFF for how the backup + ramps up. Change this setting and hbase.client.pause to suit your workload.</description> + </property> +</configuration> http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/examples/test_case_data/sandbox/hdfs-site.xml ---------------------------------------------------------------------- diff --git a/examples/test_case_data/sandbox/hdfs-site.xml b/examples/test_case_data/sandbox/hdfs-site.xml index c06222e..d58f80d 100644 --- a/examples/test_case_data/sandbox/hdfs-site.xml +++ b/examples/test_case_data/sandbox/hdfs-site.xml @@ -22,7 +22,12 @@ <property> <name>dfs.block.access.token.enable</name> - <value>true</value> + <value>false</value> + </property> + + <property> + <name>dfs.block.size</name> + <value>34217472</value> </property> <property> @@ -46,21 +51,11 @@ </property> <property> - <name>dfs.client.retry.policy.enabled</name> - <value>false</value> - </property> - - <property> <name>dfs.cluster.administrators</name> <value>hdfs</value> </property> <property> - <name>dfs.content-summary.limit</name> - <value>5000</value> - </property> - - <property> <name>dfs.datanode.address</name> <value>0.0.0.0:50010</value> </property> @@ -73,6 +68,7 @@ <property> <name>dfs.datanode.data.dir</name> <value>/hadoop/hdfs/data</value> + <final>true</final> </property> <property> @@ -88,6 +84,7 @@ <property> <name>dfs.datanode.failed.volumes.tolerated</name> <value>0</value> + <final>true</final> </property> <property> @@ -111,18 +108,13 @@ </property> <property> - <name>dfs.domain.socket.path</name> - <value>/var/lib/hadoop-hdfs/dn_socket</value> - </property> - - <property> - <name>dfs.encrypt.data.transfer.cipher.suites</name> - <value>AES/CTR/NoPadding</value> + <name>dfs.datanode.max.xcievers</name> + <value>1024</value> </property> <property> - <name>dfs.encryption.key.provider.uri</name> - <value></value> + <name>dfs.domain.socket.path</name> + <value>/var/lib/hadoop-hdfs/dn_socket</value> </property> <property> @@ -162,12 +154,7 @@ <property> <name>dfs.namenode.accesstime.precision</name> - <value>0</value> - </property> - - <property> - <name>dfs.namenode.audit.log.async</name> - <value>true</value> + <value>3600000</value> </property> <property> @@ -201,11 +188,6 @@ </property> <property> - <name>dfs.namenode.fslock.fair</name> - <value>false</value> - </property> - - <property> <name>dfs.namenode.handler.count</name> <value>100</value> </property> @@ -213,6 +195,7 @@ <property> <name>dfs.namenode.http-address</name> <value>sandbox.hortonworks.com:50070</value> + <final>true</final> </property> <property> @@ -221,13 +204,9 @@ </property> <property> - <name>dfs.namenode.inode.attributes.provider.class</name> - <value>org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer</value> - </property> - - <property> <name>dfs.namenode.name.dir</name> <value>/hadoop/hdfs/namenode</value> + <final>true</final> </property> <property> @@ -236,13 +215,8 @@ </property> <property> - <name>dfs.namenode.rpc-address</name> - <value>sandbox.hortonworks.com:8020</value> - </property> - - <property> <name>dfs.namenode.safemode.threshold-pct</name> - <value>0.999</value> + <value>1.0f</value> </property> <property> @@ -266,6 +240,16 @@ </property> <property> + <name>dfs.nfs.exports.allowed.hosts</name> + <value>* rw</value> + </property> + + <property> + <name>dfs.nfs3.dump.dir</name> + <value>/tmp/.hdfs-nfs</value> + </property> + + <property> <name>dfs.permissions.enabled</name> <value>true</value> </property> @@ -277,7 +261,7 @@ <property> <name>dfs.replication</name> - <value>3</value> + <value>1</value> </property> <property> @@ -288,11 +272,13 @@ <property> <name>dfs.support.append</name> <value>true</value> + <final>true</final> </property> <property> <name>dfs.webhdfs.enabled</name> <value>true</value> + <final>true</final> </property> <property> @@ -300,14 +286,4 @@ <value>022</value> </property> - <property> - <name>nfs.exports.allowed.hosts</name> - <value>* rw</value> - </property> - - <property> - <name>nfs.file.dump.dir</name> - <value>/tmp/.hdfs-nfs</value> - </property> - -</configuration> \ No newline at end of file +</configuration> http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/examples/test_case_data/sandbox/hive-site.xml ---------------------------------------------------------------------- diff --git a/examples/test_case_data/sandbox/hive-site.xml b/examples/test_case_data/sandbox/hive-site.xml index a8c210e..1e78107 100644 --- a/examples/test_case_data/sandbox/hive-site.xml +++ b/examples/test_case_data/sandbox/hive-site.xml @@ -22,46 +22,11 @@ </property> <property> - <name>atlas.cluster.name</name> - <value>Sandbox</value> - </property> - - <property> - <name>atlas.hook.hive.maxThreads</name> - <value>1</value> - </property> - - <property> - <name>atlas.hook.hive.minThreads</name> - <value>1</value> - </property> - - <property> - <name>atlas.hook.hive.synchronous</name> - <value>true</value> - </property> - - <property> - <name>atlas.rest.address</name> - <value>http://sandbox.hortonworks.com:21000</value> - </property> - - <property> - <name>datanucleus.autoCreateSchema</name> - <value>false</value> - </property> - - <property> <name>datanucleus.cache.level2.type</name> <value>none</value> </property> <property> - <name>datanucleus.fixedDatastore</name> - <value>true</value> - </property> - - <property> <name>hive.auto.convert.join</name> <value>true</value> </property> @@ -73,7 +38,7 @@ <property> <name>hive.auto.convert.join.noconditionaltask.size</name> - <value>357913941</value> + <value>1000000000</value> </property> <property> @@ -162,16 +127,6 @@ </property> <property> - <name>hive.default.fileformat</name> - <value>TextFile</value> - </property> - - <property> - <name>hive.default.fileformat.managed</name> - <value>TextFile</value> - </property> - - <property> <name>hive.enforce.bucketing</name> <value>true</value> </property> @@ -207,6 +162,11 @@ </property> <property> + <name>hive.exec.failure.hooks</name> + <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value> + </property> + + <property> <name>hive.exec.max.created.files</name> <value>100000</value> </property> @@ -237,11 +197,6 @@ </property> <property> - <name>hive.exec.orc.encoding.strategy</name> - <value>SPEED</value> - </property> - - <property> <name>hive.exec.parallel</name> <value>false</value> </property> @@ -252,6 +207,16 @@ </property> <property> + <name>hive.exec.post.hooks</name> + <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value> + </property> + + <property> + <name>hive.exec.pre.hooks</name> + <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value> + </property> + + <property> <name>hive.exec.reducers.bytes.per.reducer</name> <value>67108864</value> </property> @@ -297,6 +262,11 @@ </property> <property> + <name>hive.heapsize</name> + <value>250</value> + </property> + + <property> <name>hive.limit.optimize.enable</name> <value>true</value> </property> @@ -508,7 +478,7 @@ <property> <name>hive.prewarm.numcontainers</name> - <value>3</value> + <value>10</value> </property> <property> @@ -518,7 +488,7 @@ <property> <name>hive.security.authorization.enabled</name> - <value>true</value> + <value>false</value> </property> <property> @@ -538,7 +508,7 @@ <property> <name>hive.security.metastore.authorization.manager</name> - <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value> + <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly</value> </property> <property> @@ -563,7 +533,12 @@ <property> <name>hive.server2.enable.doAs</name> - <value>false</value> + <value>true</value> + </property> + + <property> + <name>hive.server2.enable.impersonation</name> + <value>true</value> </property> <property> @@ -573,7 +548,7 @@ <property> <name>hive.server2.logging.operation.log.location</name> - <value>/tmp/hive/operation_logs</value> + <value>${system:java.io.tmpdir}/${system:user.name}/operation_logs</value> </property> <property> @@ -678,7 +653,7 @@ <property> <name>hive.tez.container.size</name> - <value>1024</value> + <value>250</value> </property> <property> http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/examples/test_case_data/sandbox/mapred-site.xml ---------------------------------------------------------------------- diff --git a/examples/test_case_data/sandbox/mapred-site.xml b/examples/test_case_data/sandbox/mapred-site.xml index be470f9..e90f594 100644 --- a/examples/test_case_data/sandbox/mapred-site.xml +++ b/examples/test_case_data/sandbox/mapred-site.xml @@ -18,7 +18,7 @@ <property> <name>io.sort.mb</name> - <value>64</value> + <value>128</value> </property> <property> @@ -27,13 +27,13 @@ </property> <property> - <name>mapred.job.map.memory.mb</name> - <value>250</value> + <name>mapreduce.map.memory.mb</name> + <value>512</value> </property> <property> - <name>mapred.job.reduce.memory.mb</name> - <value>250</value> + <name>mapreduce.reduce.memory.mb</name> + <value>512</value> </property> <property> @@ -48,9 +48,7 @@ <property> <name>mapreduce.admin.user.env</name> - <value> - LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64 - </value> + <value>LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64</value> </property> <property> @@ -60,9 +58,7 @@ <property> <name>mapreduce.application.classpath</name> - <value> - $PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure - </value> + <value>/tmp/kylin/*,$HADOOP_CONF_DIR,/usr/hdp/${hdp.version}/hbase/lib/hbase-common.jar,/usr/hdp/current/hive-client/conf/,$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/usr/hdp/${hdp.version}/hadoop/lib/snappy-java-1.0.4.1.jar:/etc/hadoop/conf/secure</value> </property> <property> @@ -81,18 +77,14 @@ </property> <property> - <name>mapreduce.job.counters.max</name> - <value>130</value> - </property> - - <property> <name>mapreduce.job.emit-timeline-data</name> <value>false</value> </property> + <!--the default value on hdp is 0.05, however for test environments we need to be conservative on resource --> <property> <name>mapreduce.job.reduce.slowstart.completedmaps</name> - <value>0.05</value> + <value>1</value> </property> <property> @@ -116,28 +108,13 @@ </property> <property> - <name>mapreduce.jobhistory.recovery.enable</name> - <value>true</value> - </property> - - <property> - <name>mapreduce.jobhistory.recovery.store.class</name> - <value>org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService</value> - </property> - - <property> - <name>mapreduce.jobhistory.recovery.store.leveldb.path</name> - <value>/hadoop/mapreduce/jhs</value> - </property> - - <property> <name>mapreduce.jobhistory.webapp.address</name> <value>sandbox.hortonworks.com:19888</value> </property> <property> <name>mapreduce.map.java.opts</name> - <value>-Xmx1228m</value> + <value>-Xmx512m</value> </property> <property> @@ -147,7 +124,7 @@ <property> <name>mapreduce.map.memory.mb</name> - <value>1536</value> + <value>512</value> </property> <property> @@ -182,7 +159,7 @@ <property> <name>mapreduce.reduce.java.opts</name> - <value>-Xmx1638m</value> + <value>-Xmx200m</value> </property> <property> @@ -192,7 +169,7 @@ <property> <name>mapreduce.reduce.memory.mb</name> - <value>2048</value> + <value>512</value> </property> <property> @@ -242,7 +219,7 @@ <property> <name>mapreduce.task.io.sort.mb</name> - <value>859</value> + <value>128</value> </property> <property> @@ -257,7 +234,7 @@ <property> <name>yarn.app.mapreduce.am.command-opts</name> - <value>-Xmx819m -Dhdp.version=${hdp.version}</value> + <value>-Xmx512m</value> </property> <property> @@ -267,7 +244,7 @@ <property> <name>yarn.app.mapreduce.am.resource.mb</name> - <value>1024</value> + <value>512</value> </property> <property> @@ -275,4 +252,4 @@ <value>/user</value> </property> -</configuration> \ No newline at end of file +</configuration> http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/examples/test_case_data/sandbox/yarn-site.xml ---------------------------------------------------------------------- diff --git a/examples/test_case_data/sandbox/yarn-site.xml b/examples/test_case_data/sandbox/yarn-site.xml index ebdf44a..8256158 100644 --- a/examples/test_case_data/sandbox/yarn-site.xml +++ b/examples/test_case_data/sandbox/yarn-site.xml @@ -18,7 +18,7 @@ <property> <name>hadoop.registry.rm.enabled</name> - <value>true</value> + <value>false</value> </property> <property> @@ -28,29 +28,22 @@ <property> <name>yarn.acl.enable</name> - <value>true</value> + <value>false</value> </property> <property> <name>yarn.admin.acl</name> - <value>*</value> + <value></value> </property> <property> <name>yarn.application.classpath</name> - <value> - $HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/* - </value> - </property> - - <property> - <name>yarn.authorization-provider</name> - <value>org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer</value> + <value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*</value> </property> <property> <name>yarn.client.nodemanager-connect.max-wait-ms</name> - <value>120000</value> + <value>60000</value> </property> <property> @@ -79,11 +72,6 @@ </property> <property> - <name>yarn.node-labels.enabled</name> - <value>false</value> - </property> - - <property> <name>yarn.node-labels.fs-store.retry-policy-spec</name> <value>2000, 500</value> </property> @@ -94,6 +82,11 @@ </property> <property> + <name>yarn.node-labels.manager-class</name> + <value>org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager</value> + </property> + + <property> <name>yarn.nodemanager.address</name> <value>0.0.0.0:45454</value> </property> @@ -105,7 +98,7 @@ <property> <name>yarn.nodemanager.aux-services</name> - <value>mapreduce_shuffle,spark_shuffle</value> + <value>mapreduce_shuffle</value> </property> <property> @@ -114,11 +107,6 @@ </property> <property> - <name>yarn.nodemanager.aux-services.spark_shuffle.class</name> - <value>org.apache.spark.network.yarn.YarnShuffleService</value> - </property> - - <property> <name>yarn.nodemanager.bind-host</name> <value>0.0.0.0</value> </property> @@ -160,7 +148,7 @@ <property> <name>yarn.nodemanager.health-checker.script.timeout-ms</name> - <value>120000</value> + <value>60000</value> </property> <property> @@ -255,12 +243,12 @@ <property> <name>yarn.nodemanager.resource.memory-mb</name> - <value>7168</value> + <value>9216</value> </property> <property> <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name> - <value>80</value> + <value>100</value> </property> <property> @@ -349,11 +337,6 @@ </property> <property> - <name>yarn.resourcemanager.scheduler.monitor.enable</name> - <value>false</value> - </property> - - <property> <name>yarn.resourcemanager.state-store.max-completed-applications</name> <value>${yarn.resourcemanager.max-completed-applications}</value> </property> @@ -385,7 +368,7 @@ <property> <name>yarn.resourcemanager.webapp.https.address</name> - <value>sandbox.hortonworks.com:8090</value> + <value>localhost:8090</value> </property> <property> @@ -425,7 +408,7 @@ <property> <name>yarn.resourcemanager.zk-address</name> - <value>sandbox.hortonworks.com:2181</value> + <value>localhost:2181</value> </property> <property> @@ -450,22 +433,12 @@ <property> <name>yarn.scheduler.maximum-allocation-mb</name> - <value>7168</value> - </property> - - <property> - <name>yarn.scheduler.maximum-allocation-vcores</name> - <value>3</value> + <value>9216</value> </property> <property> <name>yarn.scheduler.minimum-allocation-mb</name> - <value>1024</value> - </property> - - <property> - <name>yarn.scheduler.minimum-allocation-vcores</name> - <value>1</value> + <value>1536</value> </property> <property> @@ -494,41 +467,6 @@ </property> <property> - <name>yarn.timeline-service.entity-group-fs-store.active-dir</name> - <value>/ats/active/</value> - </property> - - <property> - <name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name> - <value>3600</value> - </property> - - <property> - <name>yarn.timeline-service.entity-group-fs-store.done-dir</name> - <value>/ats/done/</value> - </property> - - <property> - <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes</name> - <value>org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl</value> - </property> - - <property> - <name>yarn.timeline-service.entity-group-fs-store.retain-seconds</name> - <value>604800</value> - </property> - - <property> - <name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name> - <value>60</value> - </property> - - <property> - <name>yarn.timeline-service.entity-group-fs-store.summary-store</name> - <value>org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore</value> - </property> - - <property> <name>yarn.timeline-service.generic-application-history.store-class</name> <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value> </property> @@ -544,11 +482,6 @@ </property> <property> - <name>yarn.timeline-service.leveldb-state-store.path</name> - <value>/hadoop/yarn/timeline</value> - </property> - - <property> <name>yarn.timeline-service.leveldb-timeline-store.path</name> <value>/hadoop/yarn/timeline</value> </property> @@ -574,23 +507,8 @@ </property> <property> - <name>yarn.timeline-service.plugin.enabled</name> - <value>true</value> - </property> - - <property> - <name>yarn.timeline-service.recovery.enabled</name> - <value>true</value> - </property> - - <property> - <name>yarn.timeline-service.state-store-class</name> - <value>org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore</value> - </property> - - <property> <name>yarn.timeline-service.store-class</name> - <value>org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore</value> + <value>org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore</value> </property> <property> @@ -604,11 +522,6 @@ </property> <property> - <name>yarn.timeline-service.version</name> - <value>1.5</value> - </property> - - <property> <name>yarn.timeline-service.webapp.address</name> <value>sandbox.hortonworks.com:8188</value> </property> @@ -618,4 +531,4 @@ <value>sandbox.hortonworks.com:8190</value> </property> -</configuration> \ No newline at end of file +</configuration> http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/kylin-it/src/test/java/org/apache/kylin/provision/BuildCubeWithEngine.java ---------------------------------------------------------------------- diff --git a/kylin-it/src/test/java/org/apache/kylin/provision/BuildCubeWithEngine.java b/kylin-it/src/test/java/org/apache/kylin/provision/BuildCubeWithEngine.java index df6c45e..d819098 100644 --- a/kylin-it/src/test/java/org/apache/kylin/provision/BuildCubeWithEngine.java +++ b/kylin-it/src/test/java/org/apache/kylin/provision/BuildCubeWithEngine.java @@ -32,9 +32,11 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HTable; import org.apache.kylin.common.KylinConfig; import org.apache.kylin.common.util.ClassUtil; import org.apache.kylin.common.util.HBaseMetadataTestCase; @@ -58,7 +60,6 @@ import org.apache.kylin.job.impl.threadpool.DefaultScheduler; import org.apache.kylin.source.ISource; import org.apache.kylin.source.SourceFactory; import org.apache.kylin.source.SourcePartition; -import org.apache.kylin.storage.hbase.HBaseConnection; import org.apache.kylin.storage.hbase.util.HBaseRegionSizeCalculator; import org.apache.kylin.storage.hbase.util.ZookeeperJobLock; import org.apache.kylin.tool.StorageCleanupJob; @@ -95,10 +96,10 @@ public class BuildCubeWithEngine { logger.error("error", e); exitCode = 1; } - + long millis = System.currentTimeMillis() - start; System.out.println("Time elapsed: " + (millis / 1000) + " sec - in " + BuildCubeWithEngine.class.getName()); - + System.exit(exitCode); } @@ -359,10 +360,10 @@ public class BuildCubeWithEngine { @SuppressWarnings("unused") private void checkHFilesInHBase(CubeSegment segment) throws IOException { - try (Connection conn = HBaseConnection.get(KylinConfig.getInstanceFromEnv().getStorageUrl())) { - String tableName = segment.getStorageLocationIdentifier(); - - HBaseRegionSizeCalculator cal = new HBaseRegionSizeCalculator(tableName, conn); + Configuration conf = HBaseConfiguration.create(HadoopUtil.getCurrentConfiguration()); + String tableName = segment.getStorageLocationIdentifier(); + try (HTable table = new HTable(conf, tableName)) { + HBaseRegionSizeCalculator cal = new HBaseRegionSizeCalculator(table); Map<byte[], Long> sizeMap = cal.getRegionSizeMap(); long totalSize = 0; for (Long size : sizeMap.values()) { http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/pom.xml ---------------------------------------------------------------------- diff --git a/pom.xml b/pom.xml index 40ccd0c..4d6c436 100644 --- a/pom.xml +++ b/pom.xml @@ -46,15 +46,15 @@ <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding> <!-- Hadoop versions --> - <hadoop2.version>2.7.1</hadoop2.version> - <yarn.version>2.7.1</yarn.version> + <hadoop2.version>2.6.0</hadoop2.version> + <yarn.version>2.6.0</yarn.version> <!-- Hive versions --> - <hive.version>1.2.1</hive.version> - <hive-hcatalog.version>1.2.1</hive-hcatalog.version> + <hive.version>0.14.0</hive.version> + <hive-hcatalog.version>0.14.0</hive-hcatalog.version> <!-- HBase versions --> - <hbase-hadoop2.version>1.1.1</hbase-hadoop2.version> + <hbase-hadoop2.version>0.98.8-hadoop2</hbase-hadoop2.version> <!-- Kafka versions --> <kafka.version>0.10.1.0</kafka.version> @@ -71,7 +71,7 @@ <!-- Hadoop Common deps, keep compatible with hadoop2.version --> <zookeeper.version>3.4.8</zookeeper.version> - <curator.version>2.7.1</curator.version> + <curator.version>2.6.0</curator.version> <jsr305.version>3.0.1</jsr305.version> <guava.version>14.0</guava.version> <jsch.version>0.1.53</jsch.version> @@ -842,11 +842,6 @@ <id>conjars</id> <url>http://conjars.org/repo/</url> </repository> - - <repository> - <id>cloudera</id> - <url>https://repository.cloudera.com/artifactory/cloudera-repos/</url> - </repository> </repositories> <build> @@ -1200,106 +1195,6 @@ </build> </profile> <profile> - <id>cdh5.7</id> - <properties> - <hadoop2.version>2.6.0-cdh5.7.0</hadoop2.version> - <yarn.version>2.6.0-cdh5.7.0</yarn.version> - <hive.version>1.1.0-cdh5.7.0</hive.version> - <hive-hcatalog.version>1.1.0-cdh5.7.0</hive-hcatalog.version> - <hbase-hadoop2.version>1.2.0-cdh5.7.0</hbase-hadoop2.version> - <zookeeper.version>3.4.5-cdh5.7.0</zookeeper.version> - </properties> - <build> - <plugins> - <plugin> - <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-compiler-plugin</artifactId> - <configuration> - <fork>true</fork> - <meminitial>1024m</meminitial> - <maxmem>2048m</maxmem> - </configuration> - </plugin> - - <plugin> - <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-dependency-plugin</artifactId> - <executions> - <execution> - <id>copy-jamm</id> - <goals> - <goal>copy</goal> - </goals> - <phase>generate-test-resources</phase> - <configuration> - <artifactItems> - <artifactItem> - <groupId>com.github.jbellis</groupId> - <artifactId>jamm</artifactId> - <outputDirectory>${project.build.testOutputDirectory}</outputDirectory> - <destFileName>jamm.jar</destFileName> - </artifactItem> - </artifactItems> - </configuration> - </execution> - </executions> - </plugin> - - <plugin> - <groupId>org.jacoco</groupId> - <artifactId>jacoco-maven-plugin</artifactId> - <configuration> - <append>true</append> - <destFile> - ${sonar.jacoco.reportPath} - </destFile> - </configuration> - <executions> - <execution> - <id>pre-test</id> - <goals> - <goal>prepare-agent</goal> - </goals> - <configuration> - <propertyName>surefireArgLine</propertyName> - </configuration> - </execution> - <execution> - <id>post-test</id> - <phase>test</phase> - <goals> - <goal>report</goal> - </goals> - </execution> - </executions> - </plugin> - - <plugin> - <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-surefire-plugin</artifactId> - <version>2.19.1</version> - <configuration> - <reportsDirectory>${project.basedir}/../target/surefire-reports</reportsDirectory> - <excludes> - <exclude>**/IT*.java</exclude> - </excludes> - <systemProperties> - <property> - <name>buildCubeUsingProvidedData</name> - <value>false</value> - </property> - <property> - <name>log4j.configuration</name> - <value>file:${project.basedir}/../build/conf/kylin-tools-log4j.properties</value> - </property> - </systemProperties> - <argLine>-javaagent:${project.build.testOutputDirectory}/jamm.jar ${argLine} ${surefireArgLine}</argLine> - </configuration> - </plugin> - </plugins> - </build> - </profile> - <profile> <!-- This profile adds/overrides few features of the 'apache-release' profile in the parent pom. --> <id>apache-release</id> http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/server-base/src/main/java/org/apache/kylin/rest/security/AclHBaseStorage.java ---------------------------------------------------------------------- diff --git a/server-base/src/main/java/org/apache/kylin/rest/security/AclHBaseStorage.java b/server-base/src/main/java/org/apache/kylin/rest/security/AclHBaseStorage.java index 8095bf8..ea68855 100644 --- a/server-base/src/main/java/org/apache/kylin/rest/security/AclHBaseStorage.java +++ b/server-base/src/main/java/org/apache/kylin/rest/security/AclHBaseStorage.java @@ -20,7 +20,7 @@ package org.apache.kylin.rest.security; import java.io.IOException; -import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.HTableInterface; /** */ @@ -36,6 +36,6 @@ public interface AclHBaseStorage { String prepareHBaseTable(Class<?> clazz) throws IOException; - Table getTable(String tableName) throws IOException; + HTableInterface getTable(String tableName) throws IOException; } http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/server-base/src/main/java/org/apache/kylin/rest/security/MockAclHBaseStorage.java ---------------------------------------------------------------------- diff --git a/server-base/src/main/java/org/apache/kylin/rest/security/MockAclHBaseStorage.java b/server-base/src/main/java/org/apache/kylin/rest/security/MockAclHBaseStorage.java index cc76b87..d9326f5 100644 --- a/server-base/src/main/java/org/apache/kylin/rest/security/MockAclHBaseStorage.java +++ b/server-base/src/main/java/org/apache/kylin/rest/security/MockAclHBaseStorage.java @@ -21,7 +21,7 @@ package org.apache.kylin.rest.security; import java.io.IOException; import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.kylin.common.KylinConfig; import org.apache.kylin.rest.service.AclService; import org.apache.kylin.rest.service.QueryService; @@ -34,8 +34,8 @@ public class MockAclHBaseStorage implements AclHBaseStorage { private static final String aclTableName = "MOCK-ACL-TABLE"; private static final String userTableName = "MOCK-USER-TABLE"; - private Table mockedAclTable; - private Table mockedUserTable; + private HTableInterface mockedAclTable; + private HTableInterface mockedUserTable; private RealAclHBaseStorage realAcl; public MockAclHBaseStorage() { @@ -65,7 +65,7 @@ public class MockAclHBaseStorage implements AclHBaseStorage { } @Override - public Table getTable(String tableName) throws IOException { + public HTableInterface getTable(String tableName) throws IOException { if (realAcl != null) { return realAcl.getTable(tableName); } http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/server-base/src/main/java/org/apache/kylin/rest/security/MockHTable.java ---------------------------------------------------------------------- diff --git a/server-base/src/main/java/org/apache/kylin/rest/security/MockHTable.java b/server-base/src/main/java/org/apache/kylin/rest/security/MockHTable.java index 972eea9..d0aa0ed 100644 --- a/server-base/src/main/java/org/apache/kylin/rest/security/MockHTable.java +++ b/server-base/src/main/java/org/apache/kylin/rest/security/MockHTable.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; @@ -91,7 +91,7 @@ import com.google.protobuf.ServiceException; * <li>remove some methods for loading data, checking values ...</li> * </ul> */ -public class MockHTable implements Table { +public class MockHTable implements HTableInterface { private final String tableName; private final List<String> columnFamilies = new ArrayList<>(); @@ -114,6 +114,14 @@ public class MockHTable implements Table { this.columnFamilies.add(columnFamily); } + /** + * {@inheritDoc} + */ + @Override + public byte[] getTableName() { + return tableName.getBytes(); + } + @Override public TableName getName() { return null; @@ -192,8 +200,8 @@ public class MockHTable implements Table { } @Override - public boolean[] existsAll(List<Get> list) throws IOException { - return new boolean[0]; + public Boolean[] exists(List<Get> gets) throws IOException { + return new Boolean[0]; } /** @@ -298,6 +306,15 @@ public class MockHTable implements Table { * {@inheritDoc} */ @Override + public Result getRowOrBefore(byte[] row, byte[] family) throws IOException { + // FIXME: implement + return null; + } + + /** + * {@inheritDoc} + */ + @Override public ResultScanner getScanner(Scan scan) throws IOException { final List<Result> ret = new ArrayList<Result>(); byte[] st = scan.getStartRow(); @@ -429,7 +446,7 @@ public class MockHTable implements Table { */ } if (filter.hasFilterRow() && !filteredOnRowKey) { - filter.filterRow(); + filter.filterRow(nkvs); } if (filter.filterRow() || filteredOnRowKey) { nkvs.clear(); @@ -518,11 +535,6 @@ public class MockHTable implements Table { return false; } - @Override - public boolean checkAndPut(byte[] bytes, byte[] bytes1, byte[] bytes2, CompareFilter.CompareOp compareOp, byte[] bytes3, Put put) throws IOException { - return false; - } - /** * {@inheritDoc} */ @@ -543,7 +555,7 @@ public class MockHTable implements Table { continue; } for (KeyValue kv : delete.getFamilyMap().get(family)) { - if (kv.isDelete()) { + if (kv.isDeleteFamily()) { data.get(row).get(kv.getFamily()).clear(); } else { data.get(row).get(kv.getFamily()).remove(kv.getQualifier()); @@ -580,11 +592,6 @@ public class MockHTable implements Table { return false; } - @Override - public boolean checkAndDelete(byte[] bytes, byte[] bytes1, byte[] bytes2, CompareFilter.CompareOp compareOp, byte[] bytes3, Delete delete) throws IOException { - return false; - } - /** * {@inheritDoc} */ @@ -598,7 +605,7 @@ public class MockHTable implements Table { */ @Override public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) throws IOException { - return incrementColumnValue(row, family, qualifier, amount, null); + return incrementColumnValue(row, family, qualifier, amount, true); } @Override @@ -610,6 +617,37 @@ public class MockHTable implements Table { * {@inheritDoc} */ @Override + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, boolean writeToWAL) throws IOException { + if (check(row, family, qualifier, null)) { + Put put = new Put(row); + put.add(family, qualifier, Bytes.toBytes(amount)); + put(put); + return amount; + } + long newValue = Bytes.toLong(data.get(row).get(family).get(qualifier).lastEntry().getValue()) + amount; + data.get(row).get(family).get(qualifier).put(System.currentTimeMillis(), Bytes.toBytes(newValue)); + return newValue; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean isAutoFlush() { + return true; + } + + /** + * {@inheritDoc} + */ + @Override + public void flushCommits() throws IOException { + } + + /** + * {@inheritDoc} + */ + @Override public void close() throws IOException { } @@ -635,6 +673,29 @@ public class MockHTable implements Table { * {@inheritDoc} */ @Override + public void setAutoFlush(boolean autoFlush) { + throw new NotImplementedException(); + + } + + /** + * {@inheritDoc} + */ + @Override + public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { + throw new NotImplementedException(); + + } + + @Override + public void setAutoFlushTo(boolean autoFlush) { + throw new NotImplementedException(); + } + + /** + * {@inheritDoc} + */ + @Override public long getWriteBufferSize() { throw new NotImplementedException(); } http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/server-base/src/main/java/org/apache/kylin/rest/security/RealAclHBaseStorage.java ---------------------------------------------------------------------- diff --git a/server-base/src/main/java/org/apache/kylin/rest/security/RealAclHBaseStorage.java b/server-base/src/main/java/org/apache/kylin/rest/security/RealAclHBaseStorage.java index d1a1384..1d520c4 100644 --- a/server-base/src/main/java/org/apache/kylin/rest/security/RealAclHBaseStorage.java +++ b/server-base/src/main/java/org/apache/kylin/rest/security/RealAclHBaseStorage.java @@ -21,8 +21,7 @@ package org.apache.kylin.rest.security; import java.io.IOException; import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.kylin.common.KylinConfig; import org.apache.kylin.rest.service.AclService; import org.apache.kylin.rest.service.QueryService; @@ -59,11 +58,11 @@ public class RealAclHBaseStorage implements AclHBaseStorage { } @Override - public Table getTable(String tableName) throws IOException { + public HTableInterface getTable(String tableName) throws IOException { if (StringUtils.equals(tableName, aclTableName)) { - return HBaseConnection.get(hbaseUrl).getTable(TableName.valueOf(aclTableName)); + return HBaseConnection.get(hbaseUrl).getTable(aclTableName); } else if (StringUtils.equals(tableName, userTableName)) { - return HBaseConnection.get(hbaseUrl).getTable(TableName.valueOf(userTableName)); + return HBaseConnection.get(hbaseUrl).getTable(userTableName); } else { throw new IllegalStateException("getTable failed" + tableName); } http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/server-base/src/main/java/org/apache/kylin/rest/service/AclService.java ---------------------------------------------------------------------- diff --git a/server-base/src/main/java/org/apache/kylin/rest/service/AclService.java b/server-base/src/main/java/org/apache/kylin/rest/service/AclService.java index b80d97d..5a6e401 100644 --- a/server-base/src/main/java/org/apache/kylin/rest/service/AclService.java +++ b/server-base/src/main/java/org/apache/kylin/rest/service/AclService.java @@ -33,7 +33,7 @@ import javax.annotation.PostConstruct; import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -127,7 +127,7 @@ public class AclService implements MutableAclService { @Override public List<ObjectIdentity> findChildren(ObjectIdentity parentIdentity) { List<ObjectIdentity> oids = new ArrayList<ObjectIdentity>(); - Table htable = null; + HTableInterface htable = null; try { htable = aclHBaseStorage.getTable(aclTableName); @@ -176,7 +176,7 @@ public class AclService implements MutableAclService { @Override public Map<ObjectIdentity, Acl> readAclsById(List<ObjectIdentity> oids, List<Sid> sids) throws NotFoundException { Map<ObjectIdentity, Acl> aclMaps = new HashMap<ObjectIdentity, Acl>(); - Table htable = null; + HTableInterface htable = null; Result result = null; try { htable = aclHBaseStorage.getTable(aclTableName); @@ -229,16 +229,17 @@ public class AclService implements MutableAclService { Authentication auth = SecurityContextHolder.getContext().getAuthentication(); PrincipalSid sid = new PrincipalSid(auth); - Table htable = null; + HTableInterface htable = null; try { htable = aclHBaseStorage.getTable(aclTableName); Put put = new Put(Bytes.toBytes(String.valueOf(objectIdentity.getIdentifier()))); - put.addColumn(Bytes.toBytes(AclHBaseStorage.ACL_INFO_FAMILY), Bytes.toBytes(ACL_INFO_FAMILY_TYPE_COLUMN), Bytes.toBytes(objectIdentity.getType())); - put.addColumn(Bytes.toBytes(AclHBaseStorage.ACL_INFO_FAMILY), Bytes.toBytes(ACL_INFO_FAMILY_OWNER_COLUMN), sidSerializer.serialize(new SidInfo(sid))); - put.addColumn(Bytes.toBytes(AclHBaseStorage.ACL_INFO_FAMILY), Bytes.toBytes(ACL_INFO_FAMILY_ENTRY_INHERIT_COLUMN), Bytes.toBytes(true)); + put.add(Bytes.toBytes(AclHBaseStorage.ACL_INFO_FAMILY), Bytes.toBytes(ACL_INFO_FAMILY_TYPE_COLUMN), Bytes.toBytes(objectIdentity.getType())); + put.add(Bytes.toBytes(AclHBaseStorage.ACL_INFO_FAMILY), Bytes.toBytes(ACL_INFO_FAMILY_OWNER_COLUMN), sidSerializer.serialize(new SidInfo(sid))); + put.add(Bytes.toBytes(AclHBaseStorage.ACL_INFO_FAMILY), Bytes.toBytes(ACL_INFO_FAMILY_ENTRY_INHERIT_COLUMN), Bytes.toBytes(true)); htable.put(put); + htable.flushCommits(); logger.debug("ACL of " + objectIdentity + " created successfully."); } catch (IOException e) { @@ -252,7 +253,7 @@ public class AclService implements MutableAclService { @Override public void deleteAcl(ObjectIdentity objectIdentity, boolean deleteChildren) throws ChildrenExistException { - Table htable = null; + HTableInterface htable = null; try { htable = aclHBaseStorage.getTable(aclTableName); @@ -268,6 +269,7 @@ public class AclService implements MutableAclService { } htable.delete(delete); + htable.flushCommits(); logger.debug("ACL of " + objectIdentity + " deleted successfully."); } catch (IOException e) { @@ -285,7 +287,7 @@ public class AclService implements MutableAclService { throw e; } - Table htable = null; + HTableInterface htable = null; try { htable = aclHBaseStorage.getTable(aclTableName); @@ -296,7 +298,7 @@ public class AclService implements MutableAclService { Put put = new Put(Bytes.toBytes(String.valueOf(acl.getObjectIdentity().getIdentifier()))); if (null != acl.getParentAcl()) { - put.addColumn(Bytes.toBytes(AclHBaseStorage.ACL_INFO_FAMILY), Bytes.toBytes(ACL_INFO_FAMILY_PARENT_COLUMN), domainObjSerializer.serialize(new DomainObjectInfo(acl.getParentAcl().getObjectIdentity()))); + put.add(Bytes.toBytes(AclHBaseStorage.ACL_INFO_FAMILY), Bytes.toBytes(ACL_INFO_FAMILY_PARENT_COLUMN), domainObjSerializer.serialize(new DomainObjectInfo(acl.getParentAcl().getObjectIdentity()))); } for (AccessControlEntry ace : acl.getEntries()) { @@ -308,11 +310,12 @@ public class AclService implements MutableAclService { throw new UsernameNotFoundException("User " + userName + " does not exist. Please make sure the user has logged in before"); } AceInfo aceInfo = new AceInfo(ace); - put.addColumn(Bytes.toBytes(AclHBaseStorage.ACL_ACES_FAMILY), Bytes.toBytes(aceInfo.getSidInfo().getSid()), aceSerializer.serialize(aceInfo)); + put.add(Bytes.toBytes(AclHBaseStorage.ACL_ACES_FAMILY), Bytes.toBytes(aceInfo.getSidInfo().getSid()), aceSerializer.serialize(aceInfo)); } if (!put.isEmpty()) { htable.put(put); + htable.flushCommits(); logger.debug("ACL of " + acl.getObjectIdentity() + " updated successfully."); } http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/server-base/src/main/java/org/apache/kylin/rest/service/CubeService.java ---------------------------------------------------------------------- diff --git a/server-base/src/main/java/org/apache/kylin/rest/service/CubeService.java b/server-base/src/main/java/org/apache/kylin/rest/service/CubeService.java index a6310d1..cb554c0 100644 --- a/server-base/src/main/java/org/apache/kylin/rest/service/CubeService.java +++ b/server-base/src/main/java/org/apache/kylin/rest/service/CubeService.java @@ -27,7 +27,9 @@ import java.util.List; import java.util.Map; import java.util.WeakHashMap; -import org.apache.hadoop.hbase.client.Connection; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.client.HTable; import org.apache.kylin.common.KylinConfig; import org.apache.kylin.common.util.Pair; import org.apache.kylin.cube.CubeInstance; @@ -393,24 +395,33 @@ public class CubeService extends BasicService { if (htableInfoCache.containsKey(tableName)) { return htableInfoCache.get(tableName); } - Connection conn = HBaseConnection.get(this.getConfig().getStorageUrl()); + + Configuration hconf = HBaseConnection.getCurrentHBaseConfiguration(); + HTable table = null; HBaseResponse hr = null; long tableSize = 0; int regionCount = 0; - HBaseRegionSizeCalculator cal = new HBaseRegionSizeCalculator(tableName, conn); - Map<byte[], Long> sizeMap = cal.getRegionSizeMap(); + try { + table = new HTable(hconf, tableName); - for (long s : sizeMap.values()) { - tableSize += s; - } + HBaseRegionSizeCalculator cal = new HBaseRegionSizeCalculator(table); + Map<byte[], Long> sizeMap = cal.getRegionSizeMap(); - regionCount = sizeMap.size(); + for (long s : sizeMap.values()) { + tableSize += s; + } + + regionCount = sizeMap.size(); + + // Set response. + hr = new HBaseResponse(); + hr.setTableSize(tableSize); + hr.setRegionCount(regionCount); + } finally { + IOUtils.closeQuietly(table); + } - // Set response. - hr = new HBaseResponse(); - hr.setTableSize(tableSize); - hr.setRegionCount(regionCount); htableInfoCache.put(tableName, hr); return hr; http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/server-base/src/main/java/org/apache/kylin/rest/service/QueryService.java ---------------------------------------------------------------------- diff --git a/server-base/src/main/java/org/apache/kylin/rest/service/QueryService.java b/server-base/src/main/java/org/apache/kylin/rest/service/QueryService.java index 6a38638..8972456 100644 --- a/server-base/src/main/java/org/apache/kylin/rest/service/QueryService.java +++ b/server-base/src/main/java/org/apache/kylin/rest/service/QueryService.java @@ -48,11 +48,11 @@ import javax.sql.DataSource; import org.apache.calcite.avatica.ColumnMetaData.Rep; import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Table; import org.apache.kylin.common.KylinConfig; import org.apache.kylin.common.QueryContext; import org.apache.kylin.common.debug.BackdoorToggles; @@ -163,13 +163,14 @@ public class QueryService extends BasicService { Query[] queryArray = new Query[queries.size()]; byte[] bytes = querySerializer.serialize(queries.toArray(queryArray)); - Table htable = null; + HTableInterface htable = null; try { - htable = HBaseConnection.get(hbaseUrl).getTable(TableName.valueOf(userTableName)); + htable = HBaseConnection.get(hbaseUrl).getTable(userTableName); Put put = new Put(Bytes.toBytes(creator)); - put.addColumn(Bytes.toBytes(USER_QUERY_FAMILY), Bytes.toBytes(USER_QUERY_COLUMN), bytes); + put.add(Bytes.toBytes(USER_QUERY_FAMILY), Bytes.toBytes(USER_QUERY_COLUMN), bytes); htable.put(put); + htable.flushCommits(); } finally { IOUtils.closeQuietly(htable); } @@ -195,13 +196,14 @@ public class QueryService extends BasicService { Query[] queryArray = new Query[queries.size()]; byte[] bytes = querySerializer.serialize(queries.toArray(queryArray)); - Table htable = null; + HTableInterface htable = null; try { - htable = HBaseConnection.get(hbaseUrl).getTable(TableName.valueOf(userTableName)); + htable = HBaseConnection.get(hbaseUrl).getTable(userTableName); Put put = new Put(Bytes.toBytes(creator)); - put.addColumn(Bytes.toBytes(USER_QUERY_FAMILY), Bytes.toBytes(USER_QUERY_COLUMN), bytes); + put.add(Bytes.toBytes(USER_QUERY_FAMILY), Bytes.toBytes(USER_QUERY_COLUMN), bytes); htable.put(put); + htable.flushCommits(); } finally { IOUtils.closeQuietly(htable); } @@ -213,12 +215,12 @@ public class QueryService extends BasicService { } List<Query> queries = new ArrayList<Query>(); - Table htable = null; + HTableInterface htable = null; try { - org.apache.hadoop.hbase.client.Connection conn = HBaseConnection.get(hbaseUrl); + HConnection conn = HBaseConnection.get(hbaseUrl); HBaseConnection.createHTableIfNeeded(conn, userTableName, USER_QUERY_FAMILY); - htable = HBaseConnection.get(hbaseUrl).getTable(TableName.valueOf(userTableName)); + htable = conn.getTable(userTableName); Get get = new Get(Bytes.toBytes(creator)); get.addFamily(Bytes.toBytes(USER_QUERY_FAMILY)); Result result = htable.get(get); http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/server-base/src/main/java/org/apache/kylin/rest/service/UserService.java ---------------------------------------------------------------------- diff --git a/server-base/src/main/java/org/apache/kylin/rest/service/UserService.java b/server-base/src/main/java/org/apache/kylin/rest/service/UserService.java index 9d94de1..0aefd08 100644 --- a/server-base/src/main/java/org/apache/kylin/rest/service/UserService.java +++ b/server-base/src/main/java/org/apache/kylin/rest/service/UserService.java @@ -30,11 +30,11 @@ import javax.annotation.PostConstruct; import org.apache.commons.io.IOUtils; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; import org.apache.kylin.common.util.Bytes; import org.apache.kylin.common.util.Pair; import org.apache.kylin.rest.constant.Constant; @@ -74,7 +74,7 @@ public class UserService implements UserDetailsManager { @Override public UserDetails loadUserByUsername(String username) throws UsernameNotFoundException { - Table htable = null; + HTableInterface htable = null; try { htable = aclHBaseStorage.getTable(userTableName); @@ -148,16 +148,16 @@ public class UserService implements UserDetailsManager { @Override @PreAuthorize(Constant.ACCESS_HAS_ROLE_ADMIN) public void updateUser(UserDetails user) { - Table htable = null; + HTableInterface htable = null; try { htable = aclHBaseStorage.getTable(userTableName); Pair<byte[], byte[]> pair = userToHBaseRow(user); Put put = new Put(pair.getKey()); - - put.addColumn(Bytes.toBytes(AclHBaseStorage.USER_AUTHORITY_FAMILY), Bytes.toBytes(AclHBaseStorage.USER_AUTHORITY_COLUMN), pair.getSecond()); + put.add(Bytes.toBytes(AclHBaseStorage.USER_AUTHORITY_FAMILY), Bytes.toBytes(AclHBaseStorage.USER_AUTHORITY_COLUMN), pair.getSecond()); htable.put(put); + htable.flushCommits(); } catch (IOException e) { throw new RuntimeException(e.getMessage(), e); } finally { @@ -168,13 +168,14 @@ public class UserService implements UserDetailsManager { @Override @PreAuthorize(Constant.ACCESS_HAS_ROLE_ADMIN) public void deleteUser(String username) { - Table htable = null; + HTableInterface htable = null; try { htable = aclHBaseStorage.getTable(userTableName); Delete delete = new Delete(Bytes.toBytes(username)); htable.delete(delete); + htable.flushCommits(); } catch (IOException e) { throw new RuntimeException(e.getMessage(), e); } finally { @@ -189,7 +190,7 @@ public class UserService implements UserDetailsManager { @Override public boolean userExists(String username) { - Table htable = null; + HTableInterface htable = null; try { htable = aclHBaseStorage.getTable(userTableName); @@ -220,7 +221,7 @@ public class UserService implements UserDetailsManager { s.addColumn(Bytes.toBytes(AclHBaseStorage.USER_AUTHORITY_FAMILY), Bytes.toBytes(AclHBaseStorage.USER_AUTHORITY_COLUMN)); List<UserDetails> all = new ArrayList<UserDetails>(); - Table htable = null; + HTableInterface htable = null; ResultScanner scanner = null; try { htable = aclHBaseStorage.getTable(userTableName); http://git-wip-us.apache.org/repos/asf/kylin/blob/960eeb1f/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/HBaseConnection.java ---------------------------------------------------------------------- diff --git a/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/HBaseConnection.java b/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/HBaseConnection.java index 7e2cefc..606a820 100644 --- a/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/HBaseConnection.java +++ b/storage-hbase/src/main/java/org/apache/kylin/storage/hbase/HBaseConnection.java @@ -39,9 +39,9 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.kylin.common.KylinConfig; @@ -63,7 +63,7 @@ public class HBaseConnection { private static final Logger logger = LoggerFactory.getLogger(HBaseConnection.class); private static final Map<String, Configuration> configCache = new ConcurrentHashMap<String, Configuration>(); - private static final Map<String, Connection> connPool = new ConcurrentHashMap<String, Connection>(); + private static final Map<String, HConnection> connPool = new ConcurrentHashMap<String, HConnection>(); private static final ThreadLocal<Configuration> configThreadLocal = new ThreadLocal<>(); private static ExecutorService coprocessorPool = null; @@ -74,7 +74,7 @@ public class HBaseConnection { public void run() { closeCoprocessorPool(); - for (Connection conn : connPool.values()) { + for (HConnection conn : connPool.values()) { try { conn.close(); } catch (IOException e) { @@ -143,7 +143,7 @@ public class HBaseConnection { // using a hbase:xxx URL is deprecated, instead hbase config is always loaded from hbase-site.xml in classpath if (!(StringUtils.isEmpty(url) || "hbase".equals(url))) throw new IllegalArgumentException("to use hbase storage, pls set 'kylin.storage.url=hbase' in kylin.properties"); - + Configuration conf = HBaseConfiguration.create(HadoopUtil.getCurrentConfiguration()); addHBaseClusterNNHAConfiguration(conf); @@ -210,9 +210,9 @@ public class HBaseConnection { // ============================================================================ - // returned Connection can be shared by multiple threads and does not require close() + // returned HConnection can be shared by multiple threads and does not require close() @SuppressWarnings("resource") - public static Connection get(String url) { + public static HConnection get(String url) { // find configuration Configuration conf = configCache.get(url); if (conf == null) { @@ -220,13 +220,13 @@ public class HBaseConnection { configCache.put(url, conf); } - Connection connection = connPool.get(url); + HConnection connection = connPool.get(url); try { while (true) { // I don't use DCL since recreate a connection is not a big issue. if (connection == null || connection.isClosed()) { logger.info("connection is null or closed, creating a new one"); - connection = ConnectionFactory.createConnection(conf); + connection = HConnectionManager.createConnection(conf); connPool.put(url, connection); } @@ -245,8 +245,8 @@ public class HBaseConnection { return connection; } - public static boolean tableExists(Connection conn, String tableName) throws IOException { - Admin hbase = conn.getAdmin(); + public static boolean tableExists(HConnection conn, String tableName) throws IOException { + HBaseAdmin hbase = new HBaseAdmin(conn); try { return hbase.tableExists(TableName.valueOf(tableName)); } finally { @@ -266,18 +266,18 @@ public class HBaseConnection { deleteTable(HBaseConnection.get(hbaseUrl), tableName); } - public static void createHTableIfNeeded(Connection conn, String table, String... families) throws IOException { - Admin hbase = conn.getAdmin(); - TableName tableName = TableName.valueOf(table); + public static void createHTableIfNeeded(HConnection conn, String table, String... families) throws IOException { + HBaseAdmin hbase = new HBaseAdmin(conn); + try { if (tableExists(conn, table)) { logger.debug("HTable '" + table + "' already exists"); - Set<String> existingFamilies = getFamilyNames(hbase.getTableDescriptor(tableName)); + Set<String> existingFamilies = getFamilyNames(hbase.getTableDescriptor(TableName.valueOf(table))); boolean wait = false; for (String family : families) { if (existingFamilies.contains(family) == false) { logger.debug("Adding family '" + family + "' to HTable '" + table + "'"); - hbase.addColumn(tableName, newFamilyDescriptor(family)); + hbase.addColumn(table, newFamilyDescriptor(family)); // addColumn() is async, is there a way to wait it finish? wait = true; } @@ -330,8 +330,8 @@ public class HBaseConnection { return fd; } - public static void deleteTable(Connection conn, String tableName) throws IOException { - Admin hbase = conn.getAdmin(); + public static void deleteTable(HConnection conn, String tableName) throws IOException { + HBaseAdmin hbase = new HBaseAdmin(conn); try { if (!tableExists(conn, tableName)) { @@ -341,10 +341,10 @@ public class HBaseConnection { logger.debug("delete HTable '" + tableName + "'"); - if (hbase.isTableEnabled(TableName.valueOf(tableName))) { - hbase.disableTable(TableName.valueOf(tableName)); + if (hbase.isTableEnabled(tableName)) { + hbase.disableTable(tableName); } - hbase.deleteTable(TableName.valueOf(tableName)); + hbase.deleteTable(tableName); logger.debug("HTable '" + tableName + "' deleted"); } finally {