---------------------20080826---------------------------
安装IBM MapReduce Tools for Eclipse
1)配置Hadoop Home Directory ,注意这个目录下面需要有*core.jar包
2)配置运行,start Hadoop Server,需要指定cygwin的目录方式,才能找到hadoop home
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://linux1:9000/</value>
<description>The name of the default file system. Either the literal string
"local" or a host:port for DFS.</description>
</property>
<property>
<name>mapred.job.tracker</name>
<value>hdfs://linux1:9001/</value>
<description>The host and port that the MapReduce job tracker runs at. If
"local", then jobs are run in-process as a single map and reduce task.</description>
</property>
<property>
<name>dfs.name.dir</name>
<value>/home/kevin/hadoopfs/name</value>
<description>Determines where on the local filesystem the DFS name node
should store the name table. If this is a comma-delimited list of directories
then the name table is replicated in all of the directories,
for redundancy. </description>
</property>
<property>
<name>dfs.data.dir</name>
<value>/home/kevin/hadoopfs/data</value>
<description>Determines where on the local filesystem an DFS data node
should store its blocks. If this is a comma-delimited list of directories,
then data will be stored in all named directories, typically on different devices.
Directories that do not exist are ignored.</description>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
<description>Default block replication. The actual number of replications
can be specified when the file is created. The default is used if replication
is not specified in create time.</description>
</property>
</configuration>