HBASE 0.98版本安装,二步曲:安装HADOOP到集群
1、准备4台服务器一台作为namenode
192.168.137.101 hd1
三台作为datanode
192.168.137.102 hd2
192.168.137.103 hd3
192.168.137.104 hd4
2、拉平所有服务器的时间
使用ntpdate
略
3、配置多机互信
略
4、解压hadoop
tar zxvf hadoop-2.2.0.tar.gz
5、移动hadoop到相应位置
mv hadoop-2.2.0 /home/hadoop/hadoop
6、在namenode建立相应目录
应该是755权限
mkdir /home/hadoop/hdfs
mkdir /home/hadoop/namenode
mkdir /home/hadoop/tmp
7、在datanode建立相应的目录
mkdir /home/hadoop/hdfs
mkdir /home/hadoop/tmp
8、配置hadoop
配置文件有7个:
/home/hadoop/hadoop/etc/hadoop/hadoop-env.sh
/home/hadoop/hadoop/etc/hadoop/yarn-env.sh
/home/hadoop/hadoop/etc/hadoop/slaves
/home/hadoop/hadoop/etc/hadoop/core-site.xml
/home/hadoop/hadoop/etc/hadoop/hdfs-site.xml
/home/hadoop/hadoop/etc/hadoop/mapred-site.xml
/home/hadoop/hadoop/etc/hadoop/yarn-site.xml
# /home/hadoop/hadoop/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/java
export HADOOP_HOME=/home/hadoop/hadoop
# /home/hadoop/hadoop/etc/hadoop/yarn-env.sh
export JAVA_HOME=/java
# /home/hadoop/hadoop/etc/hadoop/slaves
hd2
hd3
hd4
# /home/hadoop/hadoop/etc/hadoop/core-site.xml
fs.defaultFS
hdfs://hd1:9000
io.file.buffer.size
131072
hadoop.tmp.dir
file:/home/hadoop/tmp
hadoop.proxyuser.hduser.hosts
*
hadoop.proxyuser.hduser.groups
*
# /home/hadoop/hadoop/etc/hadoop/hdfs-site.xml
dfs.namenode.http-address
hd1:50070
dfs.namenode.secondary.http-address
hd1:9001
dfs.namenode.name.dir
file:/data/namenode
dfs.datanode.data.dir
file:/data/hdfs
dfs.replication
3
dfs.webhdfs.enabled
true
dfs.support.append
true
dfs.support.broken.append
true
# /home/hadoop/hadoop/etc/hadoop/mapred-site.xml
mapreduce.framework.name
yarn
mapreduce.jobhistory.address
hd1:10020
mapreduce.jobhistory.webapp.address
hd1:19888
# /home/hadoop/hadoop/etc/hadoop/yarn-site.xml
yarn.nodemanager.aux-services
mapreduce_shuffle
yarn.nodemanager.aux-services.mapreduce.shuffle.class
org.apache.hadoop.mapred.ShuffleHandler
yarn.resourcemanager.address
hd1:8032
yarn.resourcemanager.scheduler.address
hd1:8030
yarn.resourcemanager.resource-tracker.address
hd1:8031
yarn.resourcemanager.admin.address
hd1:8033
yarn.resourcemanager.webapp.address
hd1:8088
8、拷贝hadoop到所有datanode
scp -r /home/hadoop/hadoop/ hd2:/home/hadoop/
scp -r /home/hadoop/hadoop/ hd3:/home/hadoop/
scp -r /home/hadoop/hadoop/ hd4:/home/hadoop/
9、格式化namenode
hadoop namenode -format
10、启动dfs
start-dfs.sh
11、查看状态
http://hd1:50070/dfsnodelist.jsp
页:
[1]