|
1.准备zookeeper服务器
#node1,node2,node3
#安装请参考http://suyanzhu.blog.运维网.com/8050189/1946580
2.准备NameNode节点
#node1,node4
3.准备JournalNode节点
#node2,node3,node4
4.准备DataNode节点
#node2,node3,node4
#启动DataNode节点命令hadoop-daemon.sh start datanode
5.修改hadoop的hdfs-site.xml配置文件
dfs.nameservices
yunshuocluster
dfs.ha.namenodes.yunshuocluster
nn1,nn2
dfs.namenode.rpc-address.yunshuocluster.nn1
node1:8020
dfs.namenode.rpc-address.yunshuocluster.nn2
node4:8020
dfs.namenode.http-address.yunshuocluster.nn1
node1:50070
dfs.namenode.http-address.yunshuocluster.nn2
node4:50070
dfs.namenode.shared.edits.dir
qjournal://node2:8485;node3:8485;node4:8485/yunshuocluste
r
dfs.client.failover.proxy.provider.mycluster
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailo
verProxyProvider
dfs.ha.fencing.methods
sshfence
dfs.ha.fencing.ssh.private-key-files
/root/.ssh/id_dsa
dfs.journalnode.edits.dir
/opt/journalnode/
dfs.ha.automatic-failover.enabled
true
6.修改hadoop的core-site.xml配置文件
fs.defaultFS
hdfs://yunshuocluster
hadoop.tmp.dir
/opt/hadoop-2.5
ha.zookeeper.quorum
node1:2181,node2:2181,node3:2181
7.配置slaves配置文件
node2
node3
node4
8.启动zookeeper(node1,node2,node3)
zkServer.sh start
9.启动Journalnode(node2,node3,node4上分别执行下面的命令)
#启动命令 停止命令hadoop-daemon.sh stop journalnode
hadoop-daemon.sh start journalnode
10.检查Journalnode,通过查看日志
cd /home/hadoop-2.5.1/logs
ls
tail -200 hadoop-root-journalnode-node2.log
11.格式化NameNode(两台中的一台,这里格式化node4这台NameNode节点)
hdfs namenode -format
cd /opt/hadoop-2.5
#两台NameNode同步完成
scp -r /opt/hadoop-2.5/* root@node1:/opt/hadoop-2.5/
12.初始化zkfc
hdfs zkfc -formatZK
13.启动服务
start-dfs.sh
#stop-dfs.sh表示停止服务
|
|