Hadoop高可用集群搭建(超详细)
节点布置IP主机名描述部署软件192.168.200.201hadoop-01主节点jdk,hadoop,zookeeper192.168.200.202hadoop-02备用主节点jdk,hadoop,zookeeper192.168.200.203hadoop-03数据服务节点jdk,hadoop,zookeeper配置静态IP# 有的可能为 ifcfg-ens33vi /etc/syscon
·
节点布置
IP | 主机名 | 描述 | 部署软件 |
---|---|---|---|
192.168.200.201 | hadoop-01 | 主节点 | jdk,hadoop,zookeeper |
192.168.200.202 | hadoop-02 | 备用主节点 | jdk,hadoop,zookeeper |
192.168.200.203 | hadoop-03 | 数据服务节点 | jdk,hadoop,zookeeper |
配置静态IP
# 有的可能为 ifcfg-ens33
vi /etc/sysconfig/network-scripts/ifcfg-eno16777736
# 重启网卡
service network restart
配置ssh免密登录
部署JDK
部署zookeeper
部署hadoop
编辑hadoop-env.sh
编辑core-site.xml
<configuration>
<!-- 指定hdfs的nameservice为ns1 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns1/</value>
</property>
<!-- 指定hadoop临时目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/root/software/hadoop-2.7.7/tmp</value>
</property>
<!-- 指定zookeeper地址 -->
<property>
<name>ha.zookeeper.quorum</name>
<value>hadoop-01:2181,hadoop-02:2181,hadoop-03:2181</value>
</property>
</configuration>
编辑hdfs-site.xml
<configuration>
<!--指定hdfs的nameservice为ns1,需要和core-site.xml中的保持一致 -->
<property>
<name>dfs.nameservices</name>
<value>ns1</value>
</property>
<!-- ns1下面有两个NameNode,分别是nn1,nn2 -->
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2</value>
</property>
<!-- nn1的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>hadoop-01:9000</value>
</property>
<!-- nn1的http通信地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>hadoop-01:50070</value>
</property>
<!-- nn2的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>hadoop-02:9000</value>
</property>
<!-- nn2的http通信地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>hadoop-02:50070</value>
</property>
<!-- 指定NameNode的元数据在JournalNode上的存放位置 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://hadoop-01:8485;hadoop-02:8485;hadoop-03:8485/ns1</value>
</property>
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/root/software/hadoop-2.7.7/journaldata</value>
</property>
<!-- 开启NameNode失败自动切换 -->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<!-- 配置失败自动切换实现方式 -->
<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!-- 配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行-->
<property>
<name>dfs.ha.fencing.methods</name>
<value>
sshfence
shell(/bin/true)
</value>
</property>
<!-- 使用sshfence隔离机制时需要ssh免登陆 -->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<!-- 配置sshfence隔离机制超时时间 -->
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
</configuration>
配置yarn-site.xml
<configuration>
<!-- 开启RM高可用 -->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<!-- 指定RM的cluster id -->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>yrc</value>
</property>
<!-- 指定RM的名字 -->
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<!-- 分别指定RM的地址 -->
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>hadoop-01</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>hadoop-02</value>
</property>
<!-- 指定zk集群地址 -->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>hadoop-01:2181,hadoop-02:2181,hadoop-03:2181</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
修改mapred-site.xml(该文件不存在,需要手动创建),cp mapred-site.xml.template mapred-site.xml
<configuration>
<!-- 采用yarn作为mapreduce的资源调度框架 -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
配置slaves
hadoop-01
hadoop-02
hadoop-03
编辑yarn-env.sh
配置环境变量
export JAVA_HOME=/root/software/jdk1.8.0_251
export PATH=$PATH:$JAVA_HOME/bin
export HADOOP_HOME=/root/software/hadoop-2.7.7
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin
export ZOOKEEPER_HOME=/root/software/zookeeper-3.4.8
export PATH=$PATH:$ZOOKEEPER_HOME/bin
分发文件
# 在hadoop-01上进行
scp -r /root/software/hadoop-2.7.7 root@hadoop-02:/root/software
scp -r /root/software/hadoop-2.7.7 root@hadoop-03:/root/software
scp -r /etc/profile root@hadoop-02:/etc
scp -r /etc/profile root@hadoop-03:/etc
启动hadoop
启动journalnode,三台机器都要这一步操作(仅第一次启动hadoop时,需要这一步操作,之后不再需要手动启动journalnode)
hadoop-daemon.sh start journalnode
在hadoop-01上执行格式化操作,格式化namenode和zkfc
hdfs namenode -format
hdfs zkfc -formatZK
namenode主从信息同步,在hadoop-02节点上执行同步命令
hdfs namenode -bootstrapStandby
启动
#hadoop-01
start-all.sh
hadoop-daemon.sh start zkfc
#hadoop-02
yarn-daemon.sh start resourcemanager
hadoop-daemon.sh start zkfc
停止
#hadoop-01
stop-all.sh
hadoop-daemon.sh stop zkfc
#hadoop-02
yarn-daemon.sh stop resourcemanager
hadoop-daemon.sh stop zkfc
查看进程
[root@hadoop-01 ~]# jps
3619 ResourceManager
3734 NodeManager
2599 NameNode
3351 JournalNode
3511 DFSZKFailoverController
2696 DataNode
3788 Jps
2909 QuorumPeerMain
[root@hadoop-02 ~]# jps
2705 QuorumPeerMain
2499 NameNode
3207 Jps
3065 NodeManager
2890 JournalNode
2557 DataNode
2975 DFSZKFailoverController
[root@hadoop-03 ~]# jps
2496 DataNode
2753 JournalNode
2599 QuorumPeerMain
2986 Jps
2844 NodeManager
验证
登录主节点查看
登录备用主节点查看
kill 掉主节点的 namenode 进程, 检验hadoop-02是否会自动切换到namenode
# jps查看hadoop-01的namenode进程id,然后kill
[root@hadoop-01 ~]# jps
3619 ResourceManager
3734 NodeManager
2599 NameNode
3351 JournalNode
3511 DFSZKFailoverController
4119 Jps
2696 DataNode
2909 QuorumPeerMain
[root@hadoop-01 ~]# kill 2599
再一次登录192.168.200.202:50070进行查看
到此,hadoop高可用集群搭建完毕
开放原子开发者工作坊旨在鼓励更多人参与开源活动,与志同道合的开发者们相互交流开发经验、分享开发心得、获取前沿技术趋势。工作坊有多种形式的开发者活动,如meetup、训练营等,主打技术交流,干货满满,真诚地邀请各位开发者共同参与!
更多推荐
已为社区贡献2条内容
所有评论(0)