前言
此篇文章内容是HadoopHA的相关配置文件
安装流程请参见
安装windows+linux双系统:https://blog.csdn.net/IAmListening/article/details/89741854
搭建时间同步服务器和yum源:https://blog.csdn.net/IAmListening/article/details/89765148
搭建hadoop高可用:https://blog.csdn.net/IAmListening/article/details/89765843
Hive的安装和相关异常:https://blog.csdn.net/IAmListening/article/details/89368823
安装hbase:https://blog.csdn.net/IAmListening/article/details/89765957
vi hadoop-env.sh
export JAVA_HOME=/apps/jdk1.8.0_60
vi core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://bg19</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>slave1:2181,slave2:2181,slave3:2181</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/software/hadoopdata/tmp</value>
</property>
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
<property>
<name>fs.trash.interval</name>
<value>36000000</value>
</property>
<property>
<name>fs.trash.checkpoint.interval</name>
<value>3600</value>
</property>
vi hdfs-site.xml
<configuration>
<property>
<name>dfs.nameservices</name>
<value>bg19</value>
</property>
<property>
<name>dfs.ha.namenodes.bg19</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.bg19.nn1</name>
<value>master1:9000</value>
</property>
<property>
<name>dfs.namenode.http-address.bg19.nn1</name>
<value>master1:50070</value>
</property>
<property>
<name>dfs.namenode.rpc-address.bg19.nn2</name>
<value>master2:9000</value>
</property>
<property>
<name>dfs.namenode.http-address.bg19.nn2</name>
<value>master2:50070</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/software/hadoop/data</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/software/hadoop/name</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://slave1:8485;slave2:8485;slave3:8485/bg19</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/software/hadoop/journaldata</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.bg19</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>
sshfence
shell(/bin/true)
</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
</configuration>
vi mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
vi yarn-site.xml
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>bg_hyarn</value>
</property>
<property>
<name>yarn.resourcemanager.ha-id</name>
<value>rm</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>master2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>master1</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm1</name>
<value>master2:8088</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm1</name>
<value>master2:8090</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm2</name>
<value>master1:8088</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm2</name>
<value>master1:8090</value>
</property>
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>slave1:2181,slave2:2181,slave3:2181</value>
</property>
</configuration>
vi salves
slaver1
slaver2
slaver3