della0887 发表于 2018-10-30 08:35:22

hadoop ha+zookeeper+hbase-Tootoo

  一、环境

  1、系统:Red Hat Enterprise Linux Server>  2、所需软件包
  hadoop-2.2.0.tar.gz
  hbase-0.98.2-hadoop2-bin.tar.gz
  jdk-7u67-linux-x64.tar.gz
  zookeeper-3.4.6.tar.gz
  3、各机器运行服务
  192.168.10.40 master1 namenode resourcemanager   ZKFC   hmaster
  192.168.10.41 master2 namenode                   ZKFC   hmaster(backup)
  192.168.10.42 slave1datanode nodemanagerjournalnodehregionserverzookeeper
  192.168.10.43 slave2datanode nodemanagerjournalnodehregionserverzookeeper
  192.168.10.44 slave3datanode nodemanagerjournalnodehregionserverzookeeper
  二、安装步骤:(为了便于同步,一般都是在master1上操作)
  1、ssh无密码登录
  (mkdir -m700 .ssh)
  2、jdk的安装(每台都是)
  1)、解压
  tar zxf jdk-7u67-linux-x64.tar.gz
  ln -sf jdk1.7.0_67 jdk
  2)、配置
  sudo vim /etc/profile
  export JAVA_HOME=/home/richmail/jdk
  export PATH=$JAVA_HOME/bin:$PATH

  export>  3)执行,使生效
  source /etc/profile
  3、zookeeper的安装
  1)解压
  tar zxf zookeeper-3.4.6.tar.gz
  ln -sf zookeeper-3.4.6 zookeeper
  2)、配置
  vim zookeeper/bin/zkEnv.sh
  ZOO_LOG_DIR="/home/richmail/zookeeper/logs"
  cd zookeeper/conf
  cp zoo_sample.cfg zoo.cfg
  vim zoo.cfg
  tickTime=2000
  initLimit=10
  syncLimit=5
  dataDir=/home/richmail/zookeeper/data
  dataLogDir=/home/richmail/zookeeper/logs
  clientPort=2181
  server.1=slave1:2888:3888
  server.2=slave2:2888:3888
  server.3=slave3:2888:3888
  mkdir -p /home/richmail/zookeeper/{data,logs}
  3)、复制到slave1,slave2,slave3上
  cd
  scp -rv zookeeper slave1:~/
  ssh slave1 ‘echo 1 > /home/richmail/zookeeper/data/myid’
  scp -rv zookeeper slave2:~/
  ssh slave1 ‘echo 2 > /home/richmail/zookeeper/data/myid'
  scp -rv zookeeper slave3:~/
  ssh slave1 ‘echo 3 > /home/richmail/zookeeper/data/myid’
  4)、启动zookeeper
  分别去slave1,slave2,slave3区启动zookeeper
  cd ~/zookeeper/bin
  ./zkServer.sh start
  4、hadoop的安装
  1)、解压
  tar zxf hadoop-2.2.0.tar.gz
  ln -sf hadoop-2.2.0 hadoop
  2)、配置
  cd /home/richmail/hadoop/etc/hadoop
  vim core-site.xml
  
  
  fs.defaultFS
  hdfs://cluster
  
  
  hadoop.tmp.dir
  /home/richmail/hadoop/storage/tmp
  
  
  ha.zookeeper.quorum
  slave1:2181,slave2:2181,slave3:2181
  
  
  mkdir -p /home/richmail/hadoop/storage/tmp
  vim hadoop-env.sh
  export JAVA_HOME=/home/richmail/jdk
  export HADOOP_PID_DIR=/var/hadoop/pids//默认 /tmp下
  vim hdfs-site.xml
  
  
  dfs.nameservices
  cluster
  
  
  dfs.ha.namenodes.cluster
  master1,master2
  
  
  dfs.namenode.rpc-address.cluster.master1
  master1:9000
  
  
  dfs.namenode.rpc-address.cluster.master2
  master2:9000
  
  
  dfs.namenode.http-address.cluster.master1
  master1:50070
  
  
  dfs.namenode.http-address.cluster.master2
  master2:50070
  
  
  dfs.namenode.shared.edits.dir
  qjournal://slave1:8485;slave2:8485;slave3:8485/cluster
  
  
  dfs.ha.automatic-failover.enabled
  true
  
  
  dfs.client.failover.proxy.provider.cluster
  org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
  
  
  dfs.ha.fencing.methods
  sshfence
  
  
  dfs.ha.fencing.ssh.private-key-files
  /home/richmail/.ssh/id_rsa
  
  
  dfs.journalnode.edits.dir
  /home/richmail/hadoop/storage/journal
  
  
  mkdir -p /home/richmail/hadoop/storage/journal
  vim mapred-site.xml
  
  
  mapreduce.framework.name
  yarn
  
  
  vim yarn-env.sh
  export YARN_PID_DIR=/var/hadoop/pids
  vim yarn-site.sh
  
  
  yarn.resourcemanager.hostname
  master1
  
  
  yarn.nodemanager.aux-services
  mapreduce_shuffle
  
  
  vim slaves
  slave1
  slave2
  slave3
  3)、复制至其他机器
  cd
  scp -rv hadoop master2:~/
  scp -rv hadoop slaver1:~/
  scp -rv hadoop slaver2:~/
  scp -rv hadoop slaver3:~/
  4)、启动hadoop
  1)、在slave1,slave2,slave3上执行journalnode
  cd ~/hadoop/sbin
  ./hadoop-daemon.sh start journalnode
  2)、在master1上执行
  cd ~/hadoop/bin
  ./hdfs zkfc -formatZK
  ./hdfs namenode -format
  cd ../sbin
  ./hadoop-daemon.sh start namenode
  ./start-all.sh
  3)、在master2上执行
  cd ~/hadoop/bin
  hdfs namenode -bootstrapStandby
  cd ../sbin
  hadoop-daemon.sh start namenode
  5)、验证
  使用浏览器访问192.168.10.40:50070和192.168.10.41:50070,能够看到两个节点。一个是active,一个是standny
  或者在名字节点执行命令:
  hdfs haadmin -getServiceState master1
  hdfs haadmin -getServiceState master2
  执行hdfs haadmin –failover –forceactive master1 master2,可以使这两个节点的状态进行交换
  5、hbase的安装
  1)、解压
  tar zxf hbase-0.98.2-hadoop2-bin.tar.gz
  ln -sf hbase-0.98.2-hadoop2 hbase
  2)、配置
  cd ~/hbase/conf
  vim hbase-env.sh
  export JAVA_HOME=/home/richmail/jdk
  export HBASE_MANAGES_ZK=false
  vim hbase-env.sh
  export HBASE_PID_DIR=/var/hadoop/pids
  vim regionservers
  slave1
  slave2
  slave3
  vim hbase-site.xml
  
  
  hbase.rootdir
  hdfs://cluster/hbase
  
  
  hbase.master
  60000
  
  
  hbase.zookeeper.quorum
  slave1,slave2,slave3
  
  
  hbase.zookeeper.property.clientPort
  2181
  
  
  hbase.zookeeper.property.dataDir
  /home/richmail/hbase/zkdata
  
  
  hbase.cluster.distributed
  true
  
  
  hbase.tmp.dir
  /home/richmail/hbase/data
  
  
  mkdir ~/hbase/{zkdata,data}
  hbase有个启动错误需要把hadoop的配置文件hdfs-site.xml复制到hbase/conf下,才能解决
  3)、复制至其他机器
  cd
  scp -rv hbase master2:~/
  scp -rv hbase slaver1:~/
  scp -rv hbase slaver2:~/
  scp -rv hbase slaver3:~/
  4)、启动hbase
  在master1上执行
  cd ~/hbase/bin
  ./start-hbase.sh
  在master2上执行
  ./bin/hbase-daemon.sh start master --backup
  至此这个集群就部署OK啦

页: [1]
查看完整版本: hadoop ha+zookeeper+hbase-Tootoo