net.ipv6.conf.default.disable_ipv6=1
net.ipv6.conf.lo.disable_ipv6=1
sudo sysctl -p # have the same effect with reboot
sudo /etc/init.d/networking restart
4. Cluster setup
Config /opt/hadoop/etc/hadoop/{hadoop-env.sh, yarn-env.sh}
export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
cd /opt/hadoop
mkdir -p tmp/{data,name} # on every node. name on namenode, data on datanode
vi /etc/hosts # hostname also changed on each node
192.168.1.110 cloud1
192.168.1.112 cloud2
192.168.1.114 cloud3
vi /opt/hadoop/etc/hadoop/slaves
cloud2
cloud3
core-site.xml
据说dfs.datanode.data.dir 需要清空,不然datanode不能启动
hdfs-site.xml
yarn-site.xml
mapred-site.xml
cd /opt/hadoop/
bin/hdfs namenode -format
sbin/start-dfs.sh # cloud1 NameNode SecondaryNameNode, cloud2 and cloud3 DataNode
sbin/start-yarn.sh # cloud1 ResourceManager, cloud2 and cloud3 NodeManager
jps
查看集群状态 bin/hdfs dfsadmin -report
查看文件块组成 bin/hdfs fsck / -files -blocks
NameNode查看hdfs http://192.168.1.110:50070
查看RM http://192.168.1.110:8088
bin/hdfs dfs -mkdir /input
bin/hadoop jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-2.3.0.jar randomwriter input
5. Questions:
Q: 14/01/05 23:59:05 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
A: /opt/hadoop/lib/native/ 下面的动态链接库是32bit的,要替换成64位的
Q: ssh 登录出现Are you sure you want to continue connecting (yes/no) 解决方法
A: 修改/etc/ssh/ssh_config 将其中的# StrictHostKeyChecking ask 改成 StrictHostKeyChecking no
Q: 两个slaves的DataNode无法加入cluster系统,
A: 把/etc/hosts 里面127.0.1.1或localhost 的内容行删除