设为首页 收藏本站
查看: 446|回复: 0

[经验分享] hadoop集群搭建(一)HDFS的namenode的HA搭建

[复制链接]

尚未签到

发表于 2018-10-28 14:17:47 | 显示全部楼层 |阅读模式
  HDFS的namenode的HA搭建,准备好机器
  hadoop01    IP:192.168.216.203   GATEWAY:192.168.216.2
  hadoop02    IP:192.168.216.204   GATEWAY:192.168.216.2
  hadoop03    IP:192.168.216.205   GATEWAY:192.168.216.2
  配置网卡
  [root@hadoop01 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth0
  DEVICE=eth0
  TYPE=Ethernet
  HWADDR=00:0C:29:6B:CD:B3                             网卡MAC地址
  ONBOOT=yes                                                        yes表示开机启动
  NM_CONTROLLED=yes
  BOOTPROTO=none
  IPADDR=192.168.216.203                                   IP地址
  PREFIX=24
  GATEWAY=192.168.216.2                                    网关
  DNS1=8.8.8.8                                                        域名解析服务器地址一
  DNS2=192.168.10.254 域名解析服务器地址           域名解析服务器地址二
  DEFROUTE=yes
  IPV4_FAILURE_FATAL=yes
  IPV6INIT=no
  NAME="System eth0"
  安装java JDK 并配置环境变量
  [root@hadoop01 jdk1.8.0_152]# vim /etc/profile
  #my setting
  export JAVA_HOME=/usr/local/jdk1.8.0_152/
  export PATH=$PATH:$JAVA_HOME/bin:
  配置hadoop01/hadoop02/hadoop03之间互相ssh免密登陆
  [root@hadoop01 hadoop-2.7.1]# vim ./etc/hadoop/hadoop-env.sh
  # The java implementation to use.
  export JAVA_HOME=/usr/local/jdk1.8.0_152/
  [root@hadoop01 ~]# vim /usr/local/hadoop-2.7.1/etc/hadoop/core-site.xml
  
  
  fs.defaultFS
  hdfs://qian
  
  
  
  ha.zookeeper.quorum
  hadoop01:2181,hadoop02:2181,hadoop03:2181
  
  
  [root@hadoop01 ~]# vim /usr/local/hadoop-2.7.1/etc/hadoop/hdfs-site.xml
  
  dfs.nameservices
  qian
  
  
  dfs.ha.namenodes.qian
  nn1,nn2
  
  
  dfs.namenode.rpc-address.qian.nn1
  hadoop01:9000
  
  
  dfs.namenode.rpc-address.qian.nn2
  hadoop02:9000
  
  
  dfs.namenode.http-address.qian.nn1
  hadoop01:50070
  
  
  dfs.namenode.http-address.qian.nn2
  hadoop02:50070
  
  
  dfs.namenode.shared.edits.dir
  qjournal://hadoop01:8485;hadoop02:8485;hadoop03:8485/qian
  
  
  dfs.journalnode.edits.dir
  /home/hadata/journalnode/data
  
  
  dfs.ha.automatic-failover.enabled
  true
  
  
  dfs.client.failover.proxy.provider.qian
  org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
  
  
  dfs.ha.fencing.methods
  sshfence
  
  
  dfs.ha.fencing.ssh.private-key-files
  /root/.ssh/id_rsa
  
  
  dfs.ha.fencing.ssh.connect-timeout
  30000
  
  
  dfs.namenode.name.dir
  /home/hadata/dfs/name
  
  
  dfs.datanode.data.dir
  /home/hadata/dfs/data
  
  
  dfs.blocksize
  134217728
  
  
  dfs.permissions.enabled
  false
  
  
  dfs.replication
  3
  
  [root@hadoop01 ~]# vim /usr/local/hadoop-2.7.1/etc/hadoop/slaves
  hadoop01
  hadoop02
  hadoop03
  安装并配置zookeeper
  [root@hadoop01 zookeeper-3.4.10]# tar -zxvf /home/zookeeper-3.4.10.tar.gz -C /usr/local/
  [root@hadoop01 zookeeper-3.4.10]# cp ./conf/zoo_sample.cfg ./conf/zoo.cfg
  # The number of milliseconds of each tick
  tickTime=2000
  # The number of ticks that the initial
  # synchronization phase can take
  initLimit=5
  # The number of ticks that can pass between
  # sending a request and getting an acknowledgement
  syncLimit=2
  # the directory where the snapshot is stored.
  # do not use /tmp for storage, /tmp here is just
  # example sakes.
  dataDir=/home/zookeeperdata
  # the port at which the clients will connect
  clientPort=2181
  server.1=hadoop01:2888:3888
  server.2=hadoop02:2888:3888
  server.3=hadoop03:2888:3888
  [root@hadoop01 zookeeper-3.4.10]# scp -r /usr/local/zookeeper-3.4.10 hadoop02:/usr/local/
  [root@hadoop01 zookeeper-3.4.10]# scp -r /usr/local/zookeeper-3.4.10 hadoop03:/usr/local/
  配置三台机器的环境变量
  [root@hadoop01 zookeeper-3.4.10]# vim /etc/profile
  #my setting
  export JAVA_HOME=/usr/local/jdk1.8.0_152/
  export HADOOP_HOME=/usr/local/hadoop-2.7.1/
  export ZK_HOME=/usr/local/zookeeper-3.4.10/
  export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$ZK_HOME/bin:
  [root@hadoop01 zookeeper-3.4.10]# scp -r /etc/profile hadoop02:/etc
  profile
  [root@hadoop01 zookeeper-3.4.10]# scp -r /etc/profile hadoop03:/etc
  profile
  [root@hadoop01 ~]# source /etc/profile
  [root@hadoop02 ~]# source /etc/profile
  [root@hadoop03 ~]# source /etc/profile
  [root@hadoop01 zookeeper-3.4.10]# mkdir /home/zookeeperdata
  [root@hadoop01 zookeeper-3.4.10]# vim /home/zookeeperdata/myid                         myid文件里输入          1
  1
  [root@hadoop02 ~]# mkdir /home/zookeeperdata
  [root@hadoop02 ~]# vim /home/zookeeperdata/myid                                                   myid文件里输入          2
  2
  [root@hadoop03 ~]# mkdir /home/zookeeperdata
  [root@hadoop03 ~]# vim /home/zookeeperdata/myid                                                    myid文件里输入          3
  3
  [root@hadoop01 zookeeper-3.4.10]# zkServer.sh status
  ZooKeeper JMX enabled by default
  Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
  Mode: follower
  [root@hadoop02 ~]# zkServer.sh status
  ZooKeeper JMX enabled by default
  Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
  Mode: follower
  [root@hadoop03 ~]# zkServer.sh status
  ZooKeeper JMX enabled by default
  Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
  Mode: leader
  [root@hadoop01 zookeeper-3.4.10]# scp -r /usr/local/hadoop-2.7.1/ hadoop02:/usr/local/
  [root@hadoop01 zookeeper-3.4.10]# scp -r /usr/local/hadoop-2.7.1/ hadoop03:/usr/local/
  [root@hadoop01 zookeeper-3.4.10]# hadoop-daemon.sh start journalnode
  [root@hadoop02 zookeeper-3.4.10]# hadoop-daemon.sh start journalnode
  [root@hadoop03 zookeeper-3.4.10]# hadoop-daemon.sh start journalnode
  [root@hadoop01 zookeeper-3.4.10]# hadoop namenode -format
  [root@hadoop01 zookeeper-3.4.10]# hadoop-daemon.sh start namenode
  starting namenode, logging to /usr/local/hadoop-2.7.1/logs/hadoop-root-namenode-hadoop01.out
  同步已启动的namenode的元数据到为启动的nomenode
  [root@hadoop02 ~]# hdfs namenode -bootstrapStandby
  确认zookeeper集群是否启动
  [root@hadoop01 zookeeper-3.4.10]# zkServer.sh status
  ZooKeeper JMX enabled by default
  Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
  Mode: follower
  [root@hadoop02 ~]# zkServer.sh status
  ZooKeeper JMX enabled by default
  Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
  Mode: follower
  [root@hadoop03 ~]# zkServer.sh status
  ZooKeeper JMX enabled by default
  Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
  Mode: leader
  [root@hadoop01 zookeeper-3.4.10]# hdfs zkfc -formatZK
  .
  .
  .
  .
  ....INFO ha.ActiveStandbyElector: Successfully created /hadoop-ha/qian in ZK.
  .
  .
  .
  [root@hadoop03 ~]# zkCli.sh
  WatchedEvent state:SyncConnected type:None path:null
  [zk: localhost:2181(CONNECTED) 0] ls /
  [zookeeper, hadoop-ha]
  [zk: localhost:2181(CONNECTED) 1] ls /hadoop-ha
  [qian]
  [zk: localhost:2181(CONNECTED) 2] ls /hadoop-ha/qian
  []
  注意:退出zkCli,输入quit
  [root@hadoop01 zookeeper-3.4.10]# start-dfs.sh
  [root@hadoop01 zookeeper-3.4.10]# jps
  3281 JournalNode
  4433 Jps
  3475 NameNode
  4068 DataNode
  3110 QuorumPeerMain
  4367 DFSZKFailoverController
  [root@hadoop02 ~]# jps
  3489 DataNode
  3715 Jps
  2970 QuorumPeerMain
  3162 JournalNode
  3646 DFSZKFailoverController
  3423 NameNode
  [root@hadoop03 ~]# zkCli.sh
  zkCli.sh
  WATCHER::
  WatchedEvent state:SyncConnected type:None path:null
  [zk: localhost:2181(CONNECTED) 4] ls /hadoop-ha/qian
  [ActiveBreadCrumb, ActiveStandbyElectorLock]
  [zk: localhost:2181(CONNECTED) 2] get /hadoop-ha/qian/ActiveBreadCrumb
  qiannn1hadoop01 �F(�>
  cZxid = 0x10000000a
  ctime = Sat Jan 13 01:40:21 CST 2018
  mZxid = 0x10000000a
  mtime = Sat Jan 13 01:40:21 CST 2018
  pZxid = 0x10000000a
  cversion = 0
  dataVersion = 0
  aclVersion = 0
  ephemeralOwner = 0x0
  dataLength = 31
  numChildren = 0
  [root@hadoop01 hadoop-2.7.1]# hdfs dfs -put ./README.txt hdfs:/
  [root@hadoop01 hadoop-2.7.1]# hdfs dfs -ls /

  18/01/13 01:58:24 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java>  Found 1 items
  -rw-r--r--   3 root supergroup       1366 2018-01-13 01:57 /README.txt
  测试是否失败转移
  [root@hadoop01 hadoop-2.7.1]# jps
  3281 JournalNode
  3475 NameNode
  4644 Jps
  4068 DataNode
  3110 QuorumPeerMain
  4367 DFSZKFailoverController
  [root@hadoop01 hadoop-2.7.1]# kill -9 3475
  [root@hadoop03 ~]# zkCli.sh
  ActiveBreadCrumb           ActiveStandbyElectorLock
  [zk: localhost:2181(CONNECTED) 6] get /hadoop-ha/qian/ActiveBreadCrumb
  qiannn2hadoop02 �F(�>
  cZxid = 0x10000000a
  ctime = Sat Jan 13 01:40:21 CST 2018
  mZxid = 0x100000011
  mtime = Sat Jan 13 02:01:57 CST 2018
  pZxid = 0x10000000a
  cversion = 0
  dataVersion = 1
  aclVersion = 0
  ephemeralOwner = 0x0
  dataLength = 31
  numChildren = 0
  [root@hadoop02 ~]# jps
  3489 DataNode
  3989 Jps
  2970 QuorumPeerMain
  3162 JournalNode
  3646 DFSZKFailoverController
  3423 NameNode
  注意:一个namenode1死了会自动切换到另一个namenode2上,namenode2死后,就都死了,不会自动启动namenode1
  配置集群时间同步
  HA搭建完毕


运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-627582-1-1.html 上篇帖子: CentOS 6.5 搭建Hadoop 1.2.1集群 下篇帖子: hadoop集群搭建(二)YARN高可用
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表