# set java environment
export JAVA_HOME=/opt/java
(3)配置core-site.xml文件
<property>
<name>fs.default.name</name>
<value>hdfs://master:9000</value>
<description>
NameNode的URI路径,格式:hdfs://主机名:端口/
</description>
</property>
<property>
<name>fs.checkpoint.period</name>
<value>3600</value>
<description>
进行checkpoint的周期时间间隔,单位:秒
</description>
</property>
<property>
<name>fs.checkpoint.size</name>
<value>67108864</value>
<description>
日志文件达到这个上限值时,将进行一次强制checkpoint操作,单位:byte
</description>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/hadoop/var/tmp</value>
<description>
Hadoop的默认临时路径,这个最好配置,如果在新增节点或者其他情况下莫名其妙的DataNode启动不了,
就删除此文件中的tmp目录即可。不过如果删除了NameNode机器的此目录,那么就需要重新执行NameNode格式化的命令。
/hadoopRun/tmp这里给的路径不需要创建会自动生成。
</description>
</property>
<property>
<name>fs.trash.interval</name>
<value>1</value>
<description>Number of minutes between trash checkpoints.回收站一定要配置的,不然后果很可怕。
If zero, the trash feature is disabled.
</description>
</property>
(4)配置hdfs-site.xml文件
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
<property>
<name>dfs.name.dir</name>
<value>/opt/hadoop/var/hdfs/name</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>/opt/hadoop/var/hdfs/data</value>
</property>
<property>
<name>dfs.hosts.exclude</name>
<value>/opt/hadoop/conf/excludes</value>
<description>Names a file that contains a list of hosts that are
not permitted to connect to the namenode. The full pathname of the
file must be specified. If the value is empty, no hosts are
excluded.</description>
</property>