创建相关xml配置中目录
mkdir -p /data/hadoop/mydata/yarn
4.解压zookeeper并修改环境变量
[root@hadoop01 ~]# tar zxvf zookeeper-3.4.6.tar.gz
[root@hadoop01 ~]#mv zookeeper-3.4.6 /data/zookeeper
[root@hadoop01 ~]# vi /etc/profile
##zookeeper
export ZOOKEEPER_HOME=/data/zookeeper
export PATH=$PATH:$ZOOKEEPER_HOME/bin:$ZOOKEEPER_HOME/conf
[root@hadoop01 ~]# source /etc/profile
5.修改zookeeper配置文件
[root@hadoop01 ~]# cd /data/zookeeper/conf/
[root@hadoop01 conf]# cp zoo_sample.cfg zoo.cfg
[root@hadoop01 conf]# vi zoo.cfg
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/data/hadoop/mydata/zookeeper
dataLogDir=/data/hadoop/mydata/zookeeperlog
# the port at which the clients will connect
clientPort=2190
server.1=hadoop01:2888:3888
server.2=hadoop02:2888:3888
server.3=hadoop03:2888:3888
server.4=hadoop04:2888:3888
server.5=hadoop05:2888:3888
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
创建目录
mkdir /data/hadoop/mydata/zookeeper
mkdir /data/hadoop/mydata/zookeeperlog
6.把配置hadoop、zookeeper文件目录到其他四台中
[root@hadoop01 ~]# scp -r /data/hadoop hadoop02:/data/
[root@hadoop01 ~]# scp -r /data/hadoop hadoop03:/data/
[root@hadoop01 ~]# scp -r /data/hadoop hadoop04:/data/
[root@hadoop01 ~]# scp -r /data/hadoop hadoop05:/data/
[root@hadoop01 ~]# scp -r /data/zookeeper hadoop02:/data/
[root@hadoop01 ~]# scp -r /data/zookeeper hadoop03:/data/
[root@hadoop01 ~]# scp -r /data/zookeeper hadoop04:/data/
[root@hadoop01 ~]# scp -r /data/zookeeper hadoop05:/data/
在hadoop02修改yarn-site.xml
[root@hadoop02 hadoop]# cd /data/hadoop/etc/hadoop/
把rm1修改成rm2
[root@hadoop02 hadoop]# vi yarn-site.xml
yarn.resourcemanager.ha.id
rm2
[root@hadoop01 ~]# vi /data/hadoop/mydata/zookeeper/myid
1
[root@hadoop02 ~]# vi /data/hadoop/mydata/zookeeper/myid
2
[root@hadoop03 ~]# vi /data/hadoop/mydata/zookeeper/myid
3
[root@hadoop04 ~]# vi /data/hadoop/mydata/zookeeper/myid
4
[root@hadoop05 ~]# vi /data/hadoop/mydata/zookeeper/myid
5
7、启动zookeeper
五台操作zkServer.sh start
[root@hadoop01 ~]# zkServer.sh start
[root@hadoop01 ~]# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /data/zookeeper/bin/../conf/zoo.cfg
Mode: follower
[root@hadoop03 ~]# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /data/zookeeper/bin/../conf/zoo.cfg
Mode: leader
You have new mail in /var/spool/mail/root
正常情况只有一台leader状态
8、格式化zookeeper集群
在hadoop机器执行命令
[root@hadoop01 ~]# hdfs zkfc -formatZK
9.启动journalnode进程
在每台启动(五台)
[root@hadoop01 ~]# cd /data/hadoop/sbin/
[root@hadoop01 sbin]# ./hadoop-daemon.sh start journalnode
10.格式化namenode
在hadoop01上执行命令
[root@hadoop01 ~]# hdfs namenode -format
11.启动namenode
在hadoop01执行命令
[root@hadoop01 ~]# cd /data/hadoop/sbin/
[root@hadoop01 sbin]# ./hadoop-daemon.sh start namenode
12.将刚才格式化的namenode信息同步么备用namenode上
[root@hadoop01 ~]# hdfs namenode -bootstrapStandby
13.在hadoop02上启动namenode
[root@hadoop02 ~]# cd /data/hadoop/sbin/
[root@hadoop02 sbin]# ./hadoop-daemon.sh start namenode
14.启动所有datanode
在每台执行这是根据slaves来的
[root@hadoop01 ~]# cd /data/hadoop/sbin/
[root@hadoop01 sbin]# ./hadoop-daemon.sh start datanode
15.启动yarn
在hadoop01上执行命令
root@hadoop01 ~]# cd /data/hadoop/sbin/
[root@hadoop01 sbin]# ./start-yarn.sh
16.启动ZKFC
在hadoop01和hadoop02上启动
[root@hadoop01 ~]# cd /data/hadoop/sbin/
[root@hadoop01 sbin]# ./hadoop-daemon.sh start zkfc
17.启动成功结果
三、安装hbase HA
1.解压hbase修改配置文件
[root@hadoop01 ~]# tar zxvf hbase-1.0.2-bin.tar.gz
[root@hadoop01 ~]# mv hbase-1.0.2 /data/hbase
配置环境变量
[root@hadoop01 ~]# vi /etc/profile
##hbase
export HBASE_HOME=/data/hbase
export PATH=$PATH:$HBASE_HOME/bin
[root@hadoop01 ~]# source /etc/profile
[root@hadoop01 ~]# cd /data/hbase/conf/
[root@hadoop01 conf]# vi hbase-env.sh
# The java implementation to use. Java 1.7+ required.
export JAVA_HOME="/opt/jdk"
# Extra Java> #记得以下一定要配置,HMaster会启动不了
export HBASE_CLASSPATH=/data/hadoop/etc/hadoop
# Where log files are stored. $HBASE_HOME/logs by default.
export HBASE_LOG_DIR=/data/hbase/logs
# Tell HBase whether it should manage it's own instance of Zookeeper or not.
export HBASE_MANAGES_ZK=false
修改hbase-site.xml
[root@hadoop01 conf]# vi hbase-site.xml