public>
public static> extends Mapper{
private final static IntWritable one =new IntWritable(1);
private Text word = new Text();
public void map(Object key,Text value,Context context) throws IOException,InterruptedIOException, InterruptedException
{
StringTokenizer itr = new StringTokenizer (value.toString());
while(itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
public static> extends Reducer {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable values,Context context) throws IOException,InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
//public static void main(String[] args) throws IOException,>
public static void main(String[] args) throws IOException,> {
/*
* IntWritable intwritable = new IntWritable(1);
Text text = new Text("abc");
System.out.println(text.toString());
System.out.println(text.getLength());
System.out.println(intwritable.get());
System.out.println(intwritable);
StringTokenizer itr = new StringTokenizer ("www baidu com");
while(itr.hasMoreTokens()) {
System.out.println(itr.nextToken()); hdfs://192.168.50.107:8020/input hdfs://192.168.50.107:8020/output
*/
//String path = WordCount.class.getResource("/").toString();
//System.out.println("path = " + path);
System.out.println("Connection end");
//System.setProperty("hadoop.home.dir", "file://192.168.50.107/home/hadoop-user/hadoop-2.8.0");
//String StringInput = "hdfs://192.168.50.107:8020/input/a.txt";
//String StringOutput = "hdfs://192.168.50.107:8020/output/b.txt";
Configuration conf = new Configuration();
//conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
//conf.addResource("classpath:core-site.xml");
//conf.addResource("classpath:hdfs-site.xml");
//conf.addResource("classpath:mapred-site.xml");
//conf.set("HADOOP_HOME", "/home/hadoop-user/hadoop-2.8.0");
Job job = Job.getInstance(conf,"word count");
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//FileInputFormat.addInputPath(job, new Path(StringInput));
//FileOutputFormat.setOutputPath(job, new Path(StringOutput));
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true)?0:1);
}
} 连接hadoop的配置文件位置如图
eclipse执行运行会报错: HADOOP_HOME and hadoop.home.dir are unset. 编译打包,放入linux系统
mvn clean
mvn compile
mvn pacakge
我将打包生成的WordCount-0.0.1-SNAPSHOT.jar放到了/home/hadoop-user/work目录
在linux 运行 hadoop jar WordCount-0.0.1-SNAPSHOT.jar hadoop_mapreduce.WordCount.WordCount hdfs://192.168.50.107:8020/input hdfs://192.168.50.107:8020/output
注: 我这里如果不带类路径就会报错,找不到WordCount类。把要分析的文件放入hdfs的input目录中,Output目录不用自己创建。最后生成的分析结果会存在于output目录中