设为首页 加入收藏

TOP

hbase分布式安装及配置
2019-02-25 01:50:52 】 浏览:148
Tags:hbase 分布式 安装 配置
版权声明:转载请注明出处 https://blog.csdn.net/seashouwang/article/details/77869735

1.Hbase分布式安装

1.1. Hbase0.97分布式安装

1.1.1.安装环境

安装条件:jkd/hadoop集群已经安装完成

Hbase下载地址:

http://www.apache.org/dyn/closer.cgi/hbase/

http://apache.fayea.com/hbase/

[root@localhost85~]# java -version

javaversion "1.7.0_15"

Java(TM) SE RuntimeEnvironment (build 1.7.0_15-b03)

Java HotSpot(TM)64-Bit Server VM (build 23.7-b01, mixed mode)

[root@localhost85~]#

[root@localhost85~]# hadoop version

Hadoop1.1.2

Subversion https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.1-r 1440782

Compiled by hortonfoon Thu Jan 31 02:03:24 UTC 2013

From source withchecksum c720ddcf4b926991de7467d253a79b8b

[root@localhost85~]#

1.1.2.解压缩到当前目录

[root@localhost85local]# tar -xvf/root/download/hbase-0.94.7-security.tar.gz -C .

[root@localhost85local]# ls

bin hadoop-1.1.2 lib share

etc hbase-0.94.7-security lib64src

games include libexec VMwareTools-9.6.2-1688356.tar.gz

hadoop jdk1.7.0_15 sbin vmware-tools-distrib

1.1.3.hbase重命名

[root@localhost85local]# mv hbase-0.94.7-security/ hbase-0.94.7 #重命名

[root@localhost85local]# ls

bin hadoop-1.1.2 libshare

etc hbase-0.94.7 lib64src

games includelibexecVMwareTools-9.6.2-1688356.tar.gz

hadoop jdk1.7.0_15sbin vmware-tools-distrib

[root@localhost85local]#

1.1.4.修改配置文件hbase-env.sh

[root@localhost85hbase-0.94.7]# ls

bin docs hbase-webapps NOTICE.txtsbin

CHANGES.txt hbase-0.94.7-security.jar lib pom.xml security

conf hbase-0.94.7-security-tests.jar LICENSE.txtREADME.txt src

[root@localhost85hbase-0.94.7]# cd conf/

[root@localhost85 conf]#ls

hadoop-metrics.properties hbase-env.sh~ hbase-site.xml regionservers

hbase-env.sh hbase-policy.xml log4j.properties

[root@localhost85conf]# vim hbase-env.sh

#添加jdk运行环境

# The javaimplementation to use. Java 1.6required.

exportJAVA_HOME=/usr/local/jdk1.7.0_15

#添加hadoop运行环境

# Extra JavaCLASSPATH elements. Optional.

exportHBASE_CLASSPATH=/usr/local/hadoop-1.1.2/conf

# Tell HBase whetherit should manage it's own instance of Zookeeper or not.

exportHBASE_MANAGES_ZK=true

[root@localhost85conf]#

1.1.5.修改hbase-site.xml

[root@localhost85conf]# vim hbase-site.xml

<xmlversion="1.0">

<xml-stylesheettype="text/xsl" href="configuration.xsl">

<!--

/**

* Copyright 2010 The Apache SoftwareFoundation

*

* Licensed to the Apache Software Foundation(ASF) under one

* or more contributor license agreements. See the NOTICE file

* distributed with this work for additionalinformation

* regarding copyright ownership. The ASF licenses this file

* to you under the Apache License, Version 2.0(the

* "License"); you may not use thisfile except in compliance

* with the License. You may obtain a copy of the License at

*

*http://www.apache.org/licenses/LICENSE-2.0

*

* Unless required by applicable law or agreedto in writing, software

* distributed under the License is distributedon an "AS IS" BASIS,

* WITHOUT WARRANTIES OR CONDITIONS OF ANYKIND, either express or implied.

* See the License for the specific languagegoverning permissions and

* limitations under the License.

*/

-->

<configuration>

<property>

<name>hbase.rootdir</name>

<value>hdfs://192.168.1.85:9000/hbase</value>

</property>

<property>

<name>hbase.cluster.distributed</name>

<value>true</value>

</property>

<property>

<name>hbase.tmp.dir</name>

<value>/usr/local/hbase/tmp</value>

</property>

<property>

<name>hbase.zookeeper.quorum</name>

<value>localhost85,localhost86</value>

</property>

<property>

<name>hbase.zookeeper.property.dataDir</name>

<value>${hbase.tmp.dir}/zookeeper</value>

</property>

</configuration>

1.1.6.修改regionservers

[root@localhost85conf]# vim regionservers

localhost86

[root@localhost85conf]#

1.1.7.拷贝hbase到localhost86 /usr/local目录下

[root@localhost85local]# scp -r hbase-0.94.7/root@192.168.1.86:/usr/local/

[root@localhost86local]# ls

bin gameshadoop-1.1.2 include liblibexec share VMwareTools-9.6.2-1688356.tar.gz

etc hadoophbase-0.94.7 jdk1.7.0_15lib64 sbin srcvmware-tools-distrib

[root@localhost86local]#

[root@localhost86local]#

1.1.8.添加配置信息

[root@localhost85~]#

[root@localhost85~]# vim /etc/profile

unset i

unset -f pathmunge

exportJAVA_HOME=/usr/local/jdk1.7.0_15

exportJRE_HOME=/usr/local/jdk1.7.0_15/jre

exportCLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib

exportHADOOP_HOME_WARN_SUPPRESS=1

exportHADOOP_HOME=/usr/local/hadoop-1.1.2

exportHBASE_HOME=/usr/local/hbase-0.94.7

exportPATH=$JAVA_HOME/bin:$JRE_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/lib:$HBASE_HOME/bin:$PATH

"/etc/profile"85L, 2144C

[root@localhost85~]#

1.1.9.将配置信息拷贝到localhost86上

[root@localhost85~]# scp /etc/profile root@localhost86:/etc/

[root@localhost86local]#

[root@localhost86local]# source /etc/profile

[root@localhost86local]# jps

2456 DataNode

3888 Jps

2531 TaskTracker

[root@localhost86local]#

[root@localhost86local]# cat /etc/profile

# /etc/profile

unset i

unset -f pathmunge

exportJAVA_HOME=/usr/local/jdk1.7.0_15

exportJRE_HOME=/usr/local/jdk1.7.0_15/jre

exportCLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib

exportHADOOP_HOME_WARN_SUPPRESS=1

export HADOOP_HOME=/usr/local/hadoop-1.1.2

exportHBASE_HOME=/usr/local/hbase-0.94.7

exportPATH=$JAVA_HOME/bin:$JRE_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/lib:$HBASE_HOME/bin:$PATH

[root@localhost86local]#

1.1.10.启动hbase服务

[root@localhost85logs]#

[root@localhost85logs]# hadoop dfs -ls hdfs://localhost85:9000/hbase

[root@localhost85logs]#

[root@localhost85logs]# stop-hbase.sh

stopping hbase

192.168.1.85: nozookeeper to stop because no pid file /tmp/hbase-root-zookeeper.pid

192.168.1.86:stopping zookeeper.

[root@localhost85logs]#

[root@localhost85logs]#

[root@localhost85logs]# start-hbase.sh

192.168.1.85:starting zookeeper, logging to/usr/local/hbase-0.94.7/bin/../logs/hbase-root-zookeeper-localhost85.out

192.168.1.86:starting zookeeper, logging to/usr/local/hbase-0.94.7/bin/../logs/hbase-root-zookeeper-localhost86.out

starting master,logging to /usr/local/hbase-0.94.7/logs/hbase-root-master-localhost85.out

localhost86:starting regionserver, logging to/usr/local/hbase-0.94.7/bin/../logs/hbase-root-regionserver-localhost86.out

[root@localhost85logs]#

1.1.11.测试hbase是否正常

[root@localhost85logs]#

[root@localhost85logs]# hbase shell

HBase Shell; enter'help<RETURN>' for list of supported commands.

Type "exit<RETURN>"to leave the HBase Shell

Version 0.94.7,r1471806, Wed Apr 24 18:44:36 PDT 2013

hbase(main):001:0> list

TABLE

0 row(s) in 1.6050seconds

hbase(main):002:0>

以上表示hbase配置成功

1.1.12.停止hbase服务

[root@localhost85logs]#

[root@localhost85logs]# stop-hbase.sh

stoppinghbase............

192.168.1.86:stopping zookeeper.

192.168.1.85:stopping zookeeper.

[root@localhost85logs]#

1.2. hbase启动错误

1.2.1. org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper:The identifier of this process is 4818@localhost85

[root@localhost86local]# hbase shell

HBase Shell; enter'help<RETURN>' for list of supported commands.

Type"exit<RETURN>" to leave the HBase Shell

Version 0.94.7,r1471806, Wed Apr 24 18:44:36 PDT 2013

hbase(main):001:0>list

TABLE

17/04/01 19:15:06ERROR zookeeper.RecoverableZooKeeper: ZooKeeper exists failed after 3 retries

17/04/01 19:15:06WARN zookeeper.ZKUtil: hconnection Unable to set watcher on znode(/hbase/hbaseid)

org.apache.zookeeper.KeeperException$ConnectionLossException:KeeperErrorCode = ConnectionLoss for /hbase/hbaseid

atorg.apache.zookeeper.KeeperException.create(KeeperException.java:99)

atorg.apache.zookeeper.KeeperException.create(KeeperException.java:51)

atorg.apache.zookeeper.ZooKeeper.exists(ZooKeeper.java:1041)

atorg.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.exists(RecoverableZooKeeper.java:172)

at org.apache.hadoop.hbase.zookeeper.ZKUtil.checkExists(ZKUtil.java:450)

atorg.apache.hadoop.hbase.zookeeper.ClusterId.readClusterIdZNode(ClusterId.java:61)

atorg.apache.hadoop.hbase.zookeeper.ClusterId.getId(ClusterId.java:50)

at org.apache.hadoop.hbase.zookeeper.ClusterId.hasId(ClusterId.java:44)

at

根据日志文件记录分析:

[root@localhost85hbase-0.94.7]#

[root@localhost85hbase-0.94.7]# ls

bin docs hbase-webapps logsREADME.txt src

CHANGES.txt hbase-0.94.7-security.jar lib NOTICE.txt sbin

conf hbase-0.94.7-security-tests.jar LICENSE.txtpom.xml security

[root@localhost85hbase-0.94.7]# cd logs/

[root@localhost85logs]# ls

hbase-root-master-localhost85.log hbase-root-master-localhost85.out.2 SecurityAuth.audit

hbase-root-master-localhost85.out hbase-root-zookeeper-localhost85.log

hbase-root-master-localhost85.out.1 hbase-root-zookeeper-localhost85.out

[root@localhost85logs]# vim hbase-root-master-localhost85.log

2017-04-0118:57:30,503 INFO org.apache.zookeeper.ZooKeeper: Clientenvironment:user.home=/root

2017-04-0118:57:30,503 INFO org.apache.zookeeper.ZooKeeper: Clientenvironment:user.dir=/usr/local

2017-04-0118:57:30,504 INFO org.apache.zookeeper.ZooKeeper: Initiating client connection,connectString=localhost86:2181,localhost85:2181 sessionTimeout=180000watcher=master:60000

2017-04-0118:57:30,566 INFO org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper:The identifier of this process is 4818@localhost85

2017-04-0118:57:30,585 INFO org.apache.zookeeper.ClientCnxn: Opening socket connection toserver localhost85/192.168.1.85:2181. Will not attempt to authenticate usingSASL (unknown error)

2017-04-0118:57:30,597 WARN org.apache.zookeeper.ClientCnxn: Session 0x0 for server null,unexpected error, closing socket connection and attempting reconnect

java.net.ConnectException:拒绝连接

atsun.nio.ch.SocketChannelImpl.checkConnect(Native Method)

atsun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:692)

atorg.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:350)

atorg.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1068)

2017-04-0118:57:30,724 INFO org.apache.zookeeper.ClientCnxn: Opening socket connection toserver localhost86/192.168.1.86:2181. Will not attempt to authenticate usingSASL (unknown error)

2017-04-0118:57:30,725 INFO org.apache.zookeeper.ClientCnxn: Socket connectionestablished to localhost86/192.168.1.86:2181, initiating session

2017-04-0118:57:30,742 WARN org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper:Possibly transient ZooKeeper exception:org.apache.zookeeper.KeeperException$ConnectionLossException: KeeperErrorCode =ConnectionLoss for /hbase

2017-04-0118:57:30,742 INFO org.apache.zookeeper.ClientCnxn: Unable to read additionaldata from server sessionid 0x0, likely server has closed socket, closing socketconnection and attempting reconnect

2017-04-0118:57:30,743 INFO org.apache.hadoop.hbase.util.RetryCounter: Sleeping 2000msbefore retry #1...

2017-04-0118:57:32,616 INFO org.apache.zookeeper.ClientCnxn: Opening socket connection toserver localhost85/192.168.1.85:2181. Will not attempt to authenticate usingSASL (unknown error)

2017-04-0118:57:32,617 WARN org.apache.zookeeper.ClientCnxn: Session 0x0 for server null,unexpected error, closing socket connection and attempting reconnect

java.net.ConnectException:拒绝连接

atsun.nio.ch.SocketChannelImpl.checkConnect(Native Method)

原因:主要是hbase-site.xml中hbase.zookeeper.quorum的值配置错误

<property>

<name>hbase.zookeeper.quorum</name>

<!--<value>localhost85,localhost86</value>-->

修改为以下内容

<value>192.168.1.85,192.168.1.86</value>

</property>

1.2.2.org.apache.hadoop.hbase.ipc.ServerNotRunningYetException:Server is not running yet

hbase(main):003:0*list

TABLE

ERROR:org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not runningyet

atorg.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:2445)

atorg.apache.hadoop.hbase.master.MasterRpcServices.isMasterRunning(MasterRpcServices.java:946)

atorg.apache.hadoop.hbase.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java:58521)

atorg.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2339)

atorg.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:123)

atorg.apache.hadoop.hbase.ipc.RpcExecutor$Handler.run(RpcExecutor.java:188)

atorg.apache.hadoop.hbase.ipc.RpcExecutor$Handler.run(RpcExecutor.java:168)

Here is some helpfor this command:

List all tables inhbase. Optional regular expression parameter could

be used to filterthe output. Examples:

hbase> list

hbase> list 'abc.*'

hbase> list 'ns:abc.*'

hbase> list 'ns:.*'

原因:

Hadoop目录hdfs在访问时是受包含模式,取消hdfs目录访问安全模式。

解决办法:

进入hadoop安装目录中执行命令:bin/hdfs dfsadmin –safemode leave

1.2.3.localhosti65:ssh: Could not resolve hostname localhosti65: Name or service not known

[root@localhost65 hbase-1.3.1]#bin/start-hbase.sh

localhost65: starting zookeeper, logging to/usr/local/hbase-1.3.1/bin/../logs/hbase-root-zookeeper-localhost65.out

starting master, logging to/usr/local/hbase-1.3.1/logs/hbase-root-master-localhost65.out

Java HotSpot(TM) 64-Bit Server VM warning:ignoring option PermSize=128m; support was removed in 8.0

Java HotSpot(TM) 64-Bit Server VM warning:ignoring option MaxPermSize=128m; support was removed in 8.0

localhosti65: ssh: Couldnot resolve hostname localhosti65: Name or service not known

[root@localhost65 hbase-1.3.1]#

问题原因:

Hbase中链接Zookeeper配置写错。

解决办法:

1.检查hbase-site.xml中的zookeeper链接ip,端口配置是否正确。如下:

<property>

<name>hbase.zookeeper.quorum</name>

<value>localhost65</value>

</property>

<!--表示客户端连接 ZooKeeper 的端口 -->

<property>

<name>hbase.zookeeper.property.clientPort</name>

<value>2181</value>

</property>

2.检查regionservers文件配置地址是否正确。如下:

[root@localhost65hbase-1.3.1]# vim conf/regionservers

localhost65 #域名

[root@localhost65hbase-1.3.1]#

1.2.4.org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.fs.PathIsNotEmptyDirectoryException):`/hbase/WALs/localhost65,16201,1503546750714-splittingis non empty': Directory is not empty

2017-08-29 09:57:42,398 WARN [ProcedureExecutor-0] master.SplitLogManager:Returning success without actually splitting and deleting all the log files inpath hdfs://192.168.3.65:9000/hbase/WALs/localhost65,16201,1503546750714-splitting:[FileStatus{path=hdfs://192.168.3.65:9000/hbase/WALs/localhost65,16201,1503546750714-splitting/localhost65%2C16201%2C1503546750714.meta.1503568364024.meta;isDirectory=false; length=83; replication=1; blocksize=134217728;modification_time=1503568364030; access_time=1503568364030; owner=root;group=supergroup; permission=rw-r--r--; isSymlink=false}]

org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.fs.PathIsNotEmptyDirectoryException):`/hbase/WALs/localhost65,16201,1503546750714-splitting is non empty': Directoryis not empty

atorg.apache.hadoop.hdfs.server.namenode.FSNamesystem.deleteInternal(FSNamesystem.java:4012)

atorg.apache.hadoop.hdfs.server.namenode.FSNamesystem.deleteInt(FSNamesystem.java:3968)

atorg.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3952)

atorg.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:825)

atorg.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:589)

atorg.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)

atorg.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619)

atorg.apache.hadoop.ipc.RPC$Server.call(RPC.java:962)

atorg.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2040)

atorg.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2036)

atjava.security.AccessController.doPrivileged(Native Method)

atjavax.security.auth.Subject.doAs(Subject.java:422)

atorg.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1656)

atorg.apache.hadoop.ipc.Server$Handler.run(Server.java:2034)

该问题原因较多,从以下几个方面检查:

1、系统或hdfs是否有空间
2、datanode数是否正常
3、是否在safemode
4、防火墙关闭
5、配置方面
6、把NameNode的tmp文件清空,然后重新格式化NameNode

1.3. Hbase1.x安装

参考文档:

http://blog.csdn.net/it_taojingzhan/article/details/51789739

http://blog.csdn.net/blackenn/article/details/52234420

http://blog.csdn.net/wtq1993/article/details/53088968

Hbase版本http://apache.fayea.com/hbase/

http://archive.apache.org/dist/hbase/1.1.5/hbase-1.1.5-bin.tar.gz

1.3.1.解压缩到/usr/local目录

[root@localhost65 download]# tar -zxf hbase-1.1.5-bin.tar.gz -C /usr/local/

[root@localhost65 download]#

[root@localhost65 download]# cd /usr/local/

[root@localhost65 local]# ls

apache-hive-1.2.2-bin bin hbase jdk1.8.0_65 sbinvmware-tools-distrib

apache-hive-2.1.1-bin etchbase-1.1.5 libsharezookeeper-3.4.6

apache-kylin-1.5.0-HBase1.1.3-bin gameshbase-1.3.1 lib64 src

apache-kylin-1.6.0-bin hadoop-2.6.2 includelibexec VMwareTools-9.6.1-1378637.tar.gz

[root@localhost65 local]#

1.3.2.设置环境变量

[root@localhost65 local]# vim /etc/profile

exportHBASE_HOME=/usr/local/hbase-1.1.5

exportHBASE_CONF_DIR=$HBASE_HOME/conf

exportPATH=$HBASE_HOME/bin:$PATH

[root@localhost65 local]#

1.3.3.修改hbase-env.sh配置文件

[root@localhost65 ~]# cd /usr/local/

[root@localhost65 local]# ls

apache-hive-1.2.2-bin bin hbase jdk1.8.0_65 sbinvmware-tools-distrib

apache-hive-2.1.1-bin etc hbase-1.1.5 libsharezookeeper-3.4.6

apache-kylin-1.5.0-HBase1.1.3-bin gameshbase-1.3.1 lib64 src

apache-kylin-1.6.0-bin hadoop-2.6.2 includelibexecVMwareTools-9.6.1-1378637.tar.gz

[root@localhost65 local]# cd hbase

hbase/hbase-1.1.5/ hbase-1.3.1/

[root@localhost65 local]# cd hbase-1.1.5/

[root@localhost65 hbase-1.1.5]# ls

binCHANGES.txt conf docshbase-webapps LEGAL libLICENSE.txt logs NOTICE.txtREADME.txt

[root@localhost65 hbase-1.1.5]# cd conf/

[root@localhost65 conf]# ls

hadoop-metrics2-hbase.properties hbase-env.cmdhbase-env.shhbase-policy.xml hbase-site.xml log4j.properties regionservers

修改hbase-env.sh

[root@localhost65 conf]# vim hbase-env.sh

#添加一下内容:

# The java implementation to use. Java 1.7+ required.

exportJAVA_HOME=/usr/local/jdk1.8.0_65

# Extra Java CLASSPATH elements. Optional.

#设置到Hadoop的etc/hadoop目录是用来引导Hbase找到Hadoop,即hbase和hadoop进行关联

exportHBASE_CLASSPATH=/usr/local/hadoop-2.6.2/etc/Hadoop

exportHBASE_MANAGES_ZK=false 默认为true,使用自带的zookeeper,如果设置为false,需要自己配置zookeeper.

[root@localhost65 conf]#

1.3.4.修改hbase-site.xml配置文件

[root@localhost65 conf]#

[root@localhost65 conf]# vim hbase-site.xml

<xml version="1.0">

<xml-stylesheet type="text/xsl"href="configuration.xsl">

<!--

/**

*

*Licensed to the Apache Software Foundation (ASF) under one

* ormore contributor license agreements. Seethe NOTICE file

*distributed with this work for additional information

*regarding copyright ownership. The ASFlicenses this file

*with the License. You may obtain a copyof the License at

*

*http://www.apache.org/licenses/LICENSE-2.0

*

*Unless required by applicable law or agreed to in writing, software

*distributed under the License is distributed on an "AS IS" BASIS,

*WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

*See the License for the specific language governing permissions and

*limitations under the License.

*/

-->

<configuration>

<!--hbase.rootdir的前端与$HADOOP_HOME/conf/core-site.xmlfs.defaultFS一致 -->

<property>

<name>hbase.rootdir</name>

<value>hdfs://192.168.3.65:9000/hbase</value>

</property>

<property>

<name>hbase.cluster.distributed</name>

<value>true</value>

</property>

<!--本地文件系统的临时文件夹。可以修改到一个更为持久的目录上。(/tmp会在重启时清除) -->

<property>

<name>hbase.tmp.dir</name>

<value>/home/hbase/tmp</value>

</property>

<!--如果只设置单个 Hmaster,那么 hbase.master 属性参数需要设置为 master5:60000 (主机名:60000) -->

<!--如果要设置多个 Hmaster,那么我们只需要提供端口 60000,因为选择真正的 master 的事情会有 zookeeper 去处理 -->

<property>

<name>hbase.master</name>

<value>60000</value>

</property>

<!--这个参数用户设置 ZooKeeper 快照的存储位置,默认值为 /tmp,显然在重启的时候会清空。因为笔者的 ZooKeeper 是独立安装的,所以这里路径是指向了 $ZOOKEEPER_HOME/conf/zoo.cfg dataDir 所设定的位置 -->

<property>

<name>hbase.zookeeper.property.dataDir</name>

<value>/home/zookeeper/data</value>

</property>

<property>

<name>hbase.zookeeper.quorum</name>

<value>localhost65</value>

</property>

<!--表示客户端连接 ZooKeeper 的端口 -->

<property>

<name>hbase.zookeeper.property.clientPort</name>

<value>2181</value>

</property>

<!--ZooKeeper 会话超时。Hbase 把这个值传递改 zk 集群,向它推荐一个会话的最大超时时间 -->

<property>

<name>zookeeper.session.timeout</name>

<value>120000</value>

</property>

<!-- regionserver 遇到 ZooKeeper sessionexpired regionserver 将选择 restart 而不是 abort -->

<property>

<name>hbase.regionserver.restart.on.zk.expire</name>

<value>true</value>

</property>

</configuration>

1.3.5.修改regionservers文件

[root@localhost65 conf]#

[root@localhost65 conf]# vim regionservers

localhost65 #本机域名

[root@localhost65 conf]#

1.3.6.启动hbase服务

[root@localhost65 bin]# pwd

/usr/local/hbase-1.1.5/bin

[root@localhost65 bin]# ls

draining_servers.rb hbase.cmd hbase-daemons.sh master-backup.sh rolling-restart.sh stop-hbase.sh

get-active-master.rb hbase-common.sh hbase-jruby region_mover.rb shutdown_regionserver.rb test

graceful_stop.sh hbase-config.cmd hirb.rb regionservers.sh start-hbase.cmd thread-pool.rb

hbase hbase-config.sh local-master-backup.sh region_status.rb start-hbase.sh zookeepers.sh

hbase-cleanup.sh hbase-daemon.sh local-regionservers.sh replication stop-hbase.cmd

[root@localhost65 bin]#

[root@localhost65 bin]# start-hbase.sh

starting master, logging to/usr/local/hbase-1.1.5/logs/hbase-root-master-localhost65.out

Java HotSpot(TM) 64-Bit Server VM warning:ignoring option PermSize=128m; support was removed in 8.0

Java HotSpot(TM) 64-Bit Server VM warning:ignoring option MaxPermSize=128m; support was removed in 8.0

localhost65: starting regionserver, loggingto /usr/local/hbase-1.1.5/bin/../logs/hbase-root-regionserver-localhost65.out

localhost65: Java HotSpot(TM) 64-Bit ServerVM warning: ignoring option PermSize=128m; support was removed in 8.0

localhost65: Java HotSpot(TM) 64-Bit ServerVM warning: ignoring option MaxPermSize=128m; support was removed in 8.0

[root@localhost65 bin]#

1.3.7.查看hbase服务是否已经启动

[root@localhost65 bin]# jps

24272 SecondaryNameNode

5219 Main

24534 NodeManager

24439 ResourceManager

24152 DataNode

25400 HMaster #表示hbase已经启动成功

17960 RunJar

12841 RunJar

25867 Jps

25516 HRegionServer

4335 QuorumPeerMain

24063 NameNode

[root@localhost65 bin]#

1.3.8.登录zookeeper查看hbase

[root@localhost65 local]# cdzookeeper-3.4.6/

[root@localhost65 zookeeper-3.4.6]# ls

binconf docs lib README_packaging.txt src zookeeper-3.4.6.jar.md5

build.xmlcontrib ivysettings.xml LICENSE.txtREADME.txtzookeeper-3.4.6.jarzookeeper-3.4.6.jar.sha1

CHANGES.txtdist-maven ivy.xml NOTICE.txt recipes zookeeper-3.4.6.jar.asc

[root@localhost65 zookeeper-3.4.6]# bin/zk

zkCleanup.sh zkCli.cmdzkCli.sh zkEnv.cmd zkEnv.sh zkServer.cmd zkServer.sh

[root@localhost65 zookeeper-3.4.6]# bin/zkCli.sh

Connecting to localhost:2181

2017-08-25 16:11:38,751 [myid:] - INFO [main:Environment@100] - Clientenvironment:zookeeper.version=3.4.6-1569965, built on 02/20/2014 09:09 GMT

2017-08-25 16:11:38,757 [myid:] - INFO [main:Environment@100] - Clientenvironment:host.name=localhost65

2017-08-25 16:11:38,757 [myid:] - INFO [main:Environment@100] - Clientenvironment:java.version=1.8.0_65

2017-08-25 16:11:38,760 [myid:] - INFO [main:Environment@100] - Clientenvironment:java.vendor=Oracle Corporation

2017-08-25 16:11:38,760 [myid:] - INFO [main:Environment@100] - Clientenvironment:java.home=/usr/local/jdk1.8.0_65/jre

2017-08-25 16:11:38,761 [myid:] - INFO [main:Environment@100] - Clientenvironment:java.class.path=/usr/local/zookeeper-3.4.6/bin/../build/classes:/usr/local/zookeeper-3.4.6/bin/../build/lib/*.jar:/usr/local/zookeeper-3.4.6/bin/../lib/slf4j-log4j12-1.6.1.jar:/usr/local/zookeeper-3.4.6/bin/../lib/slf4j-api-1.6.1.jar:/usr/local/zookeeper-3.4.6/bin/../lib/netty-3.7.0.Final.jar:/usr/local/zookeeper-3.4.6/bin/../lib/log4j-1.2.16.jar:/usr/local/zookeeper-3.4.6/bin/../lib/jline-0.9.94.jar:/usr/local/zookeeper-3.4.6/bin/../zookeeper-3.4.6.jar:/usr/local/zookeeper-3.4.6/bin/../src/java/lib/*.jar:/usr/local/zookeeper-3.4.6/bin/../conf:.:/usr/local/jdk1.8.0_65/lib

2017-08-25 16:11:38,761 [myid:] - INFO [main:Environment@100] - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib

2017-08-25 16:11:38,762 [myid:] - INFO [main:Environment@100] - Clientenvironment:java.io.tmpdir=/tmp

2017-08-25 16:11:38,764 [myid:] - INFO [main:Environment@100] - Client environment:java.compiler=<NA>

2017-08-25 16:11:38,765 [myid:] - INFO [main:Environment@100] - Clientenvironment:os.name=Linux

2017-08-25 16:11:38,765 [myid:] - INFO [main:Environment@100] - Clientenvironment:os.arch=amd64

2017-08-25 16:11:38,765 [myid:] - INFO [main:Environment@100] - Clientenvironment:os.version=2.6.32-431.el6.x86_64

2017-08-25 16:11:38,765 [myid:] - INFO [main:Environment@100] - Clientenvironment:user.name=root

2017-08-25 16:11:38,765 [myid:] - INFO [main:Environment@100] - Clientenvironment:user.home=/root

2017-08-25 16:11:38,765 [myid:] - INFO [main:Environment@100] - Clientenvironment:user.dir=/usr/local/zookeeper-3.4.6

2017-08-25 16:11:38,769 [myid:] - INFO [main:ZooKeeper@438] - Initiating clientconnection, connectString=localhost:2181 sessionTimeout=30000watcher=org.apache.zookeeper.ZooKeeperMain$MyWatcher@67424e82

Welcome to ZooKeeper!

2017-08-25 16:11:38,835 [myid:] - INFO[main-SendThread(localhost:2181):ClientCnxn$SendThread@975] - Openingsocket connection to server localhost/0:0:0:0:0:0:0:1:2181. Will not attempt toauthenticate using SASL (unknown error)

JLine support is enabled

2017-08-25 16:11:38,961 [myid:] - INFO[main-SendThread(localhost:2181):ClientCnxn$SendThread@852] - Socketconnection established to localhost/0:0:0:0:0:0:0:1:2181, initiating session

2017-08-25 16:11:38,981 [myid:] - INFO[main-SendThread(localhost:2181):ClientCnxn$SendThread@1235] - Sessionestablishment complete on server localhost/0:0:0:0:0:0:0:1:2181, sessionid =0x15e17152f380013, negotiated timeout = 30000

WATCHER::

WatchedEvent state:SyncConnected type:Nonepath:null

[zk: localhost:2181(CONNECTED) 0] ls /

[zookeeper, hbase]

[zk: localhost:2181(CONNECTED) 1] ls /hbase

[replication, meta-region-server, rs,splitWAL, backup-masters, table-lock, flush-table-proc, region-in-transition,online-snapshot, switch, master, running, recovering-regions, draining,namespace, hbaseid, table]

[zk: localhost:2181(CONNECTED) 2]

1.3.9.关闭hbase服务

[root@localhost65 bin]#

[root@localhost65 bin]# stop-hbase.sh

stopping hbase.....................

[root@localhost65 bin]#

[root@localhost65 bin]# jps

24272 SecondaryNameNode

5219 Main

24534 NodeManager

24439 ResourceManager

24152 DataNode

17960 RunJar

12841 RunJar

26220 Jps

4335 QuorumPeerMain

24063 NameNode

[root@localhost65 bin]#

1.3.10.测试hbase服务

浏览器输入:http://192.168.3.65:16010/hbase默认端口是16010。

】【打印繁体】【投稿】【收藏】 【推荐】【举报】【评论】 【关闭】 【返回顶部
上一篇hbase keyvalue 下一篇hue——hbase、hive的使用

最新文章

热门文章

Hot 文章

Python

C 语言

C++基础

大数据基础

linux编程基础

C/C++面试题目