这里我们安装三台的集群:集群ip:192.168.157.132-134
master:192.168.157.132
slaves:192.168.157.133-134
hbase 依赖zookeeper自行安装。
1、配置ssh 无密码 免登陆
1>安装ssh:132 、134、 133
yum install openssh-clients
2> 配置无密码免登陆:132 、134、 133
[root@localhost ~]# ssh-keygen -t rsa
一路回车
[root@localhost ~]# cd /root/.ssh/
[root@localhost .ssh]# cat id_rsa.pub >> authorized_keys
[root@localhost .ssh]# scp authorized_keys root@192.168.157.132:/root/.ssh
[root@localhost .ssh]# scp authorized_keys root@192.168.157.133:/root/.ssh
[root@localhost .ssh]# scp authorized_keys root@192.168.157.134:/root/.ssh
修改权限三台
[root@localhost ~]# chmod 700 .ssh/
[root@localhost ~]# chmod 600 ~/.ssh/authorized_keys
2、修改机器名称
1> 通过命令修改:重启失效
[root@localhost .ssh]# hostname dev-157-132
[root@localhost .ssh]# hostname
dev-157-132 2> 修改配置文件:永久生效
[root@localhost .ssh]# vim /etc/sysconfig/network
NETWORKING=yes
NETWORKING_IPV6=no
HOSTNAME=dev-157-132
GATEWAY=192.168.248.254
3>三台先后经过两个步骤修改即可:dev-157-132 dev-157-133 dev-157-134
4>修改三台/etc/hosts
[root@localhost .ssh]# vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.157.132 dev-157-132
192.168.157.133 dev-157-133
192.168.157.134 dev-157-134
3、关闭防火墙(三台)
service iptables stop
4、安装JDK 略...
5、安装hadoop2.2.0
[root@dev-157-132 servers]# tar -xf hadoop-2.2.0.tar.gz
[root@dev-157-132 servers]# cd hadoop-2.2.0/etc/hadoop
1> 修改hadoop-env.sh
[root@dev-157-132 hadoop]# vim hadoop-env.sh
export JAVA_HOME=/export/servers/jdk1.6.0_25 (这java_home)
其他默认
2> 修改core-site.xml
[root@dev-157-132 hadoop]# vim core-site.xml
fs.defaultFS
hdfs://dev-157-132:9100
hadoop.tmp.dir
/export/servers/hadoop-2.2.0/data/hadoop_tmp
io.native.lib.available
true
3>修改 mapred-site.xml
mapreduce.framework.name
yarn
4>修改yarn-site.xml
[root@dev-157-132 hadoop]# vim yarn-site.xml
yarn.resourcemanager.resource-tracker.address
dev-157-132:8031
yarn.resourcemanager.scheduler.address
dev-157-132:8030
yarn.resourcemanager.scheduler.class
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
yarn.resourcemanager.address
dev-157-132:8032
the host is the hostname of the ResourceManager and the port is the port on
which the clients can talk to the Resource Manager.
5>修改hdfs-site.xml
[root@dev-157-132 hadoop]# vim hdfs-site.xml
dfs.namenode.name.dir
file:/export/servers/hadoop-2.2.0/data/nn
dfs.datanode.data.dir
file:/export/servers/hadoop-2.2.0/data/dfs
dfs.permissions
false
6>修改slaves
[root@dev-157-132 hadoop]# vim slaves
dev-157-133
dev-157-134
7> scp 到slave机器上
scp -r hadoop-2.2.0 root@192.168.157.133:/export/servers
scp -r hadoop-2.2.0 root@192.168.157.134:/export/servers
8> 设置三台环境变量
[root@dev-157-132 hadoop]# vim /etc/profile
export HADOOP_HOME=/export/servers/hadoop-2.2.0
export HADOOP_CONF_DIR=/export/servers/hadoop-2.2.0/etc/hadoop
export PATH=$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
[root@dev-157-132 hadoop]# source /etc/profile
9> 格式化 hdfs 在master
hadoop namenode -format
6 、启动hadoop 在 master
start-all.sh
This script is Deprec