设为首页 收藏本站
查看: 1086|回复: 0

[经验分享] Hadoop MapReduce编程 API入门系列之推荐系统(十三)

[复制链接]

尚未签到

发表于 2017-12-18 10:50:50 | 显示全部楼层 |阅读模式
  不多说,直接上代码。
DSC0000.png

DSC0001.png

DSC0002.png

DSC0003.png

DSC0004.png

DSC0005.png

DSC0006.png

DSC0007.png

DSC0008.png

DSC0009.png

DSC00010.png

DSC00011.png

DSC00012.png

DSC00013.png

DSC00014.png

DSC00015.png

DSC00016.png

DSC00017.png

DSC00018.png

  代码
  package zhouls.bigdata.myMapReduce.tuijian;
  import java.io.IOException;
  import java.util.Map;
  import java.util.StringTokenizer;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.io.IntWritable;
  import org.apache.hadoop.io.LongWritable;
  import org.apache.hadoop.io.NullWritable;
  import org.apache.hadoop.io.Text;
  import org.apache.hadoop.mapreduce.Job;
  import org.apache.hadoop.mapreduce.Mapper;
  import org.apache.hadoop.mapreduce.Reducer;
  import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
  /**
  * 去重复
  * @author root
  *
  */

  public>  public static boolean run(Configuration config,Map<String, String> paths){
  try {
  FileSystem fs =FileSystem.get(config);
  Job job =Job.getInstance(config);
  job.setJobName("step1");
  job.setJarByClass(Step1.class);
  job.setMapperClass(Step1_Mapper.class);
  job.setReducerClass(Step1_Reducer.class);
  //
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(NullWritable.class);
  FileInputFormat.addInputPath(job, new Path(paths.get("Step1Input")));
  Path outpath=new Path(paths.get("Step1Output"));
  if(fs.exists(outpath)){
  fs.delete(outpath,true);
  }
  FileOutputFormat.setOutputPath(job, outpath);
  boolean f= job.waitForCompletion(true);
  return f;
  } catch (Exception e) {
  e.printStackTrace();
  }
  return false;
  }

  static>  protected void map(LongWritable key, Text value,
  Context context)
  throws IOException, InterruptedException {
  if(key.get()!=0){
  context.write(value, NullWritable.get());
  }
  }
  }

  static>  protected void reduce(Text key, Iterable<IntWritable> i,
  Context context)
  throws IOException, InterruptedException {
  context.write(key,NullWritable.get());
  }
  }
  }
  package zhouls.bigdata.myMapReduce.tuijian;
  import java.io.IOException;
  import java.util.HashMap;
  import java.util.Map;
  import java.util.Map.Entry;
  import java.util.StringTokenizer;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.io.IntWritable;
  import org.apache.hadoop.io.LongWritable;
  import org.apache.hadoop.io.MapWritable;
  import org.apache.hadoop.io.NullWritable;
  import org.apache.hadoop.io.Text;
  import org.apache.hadoop.mapreduce.Job;
  import org.apache.hadoop.mapreduce.Mapper;
  import org.apache.hadoop.mapreduce.Reducer;
  import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
  import org.apache.log4j.Logger;
  /**
  * 按用户分组,计算所有物品出现的组合列表,得到用户对物品的喜爱度得分矩阵
  u13i160:1,
  u14i25:1,i223:1,
  u16i252:1,
  u21i266:1,
  u24i64:1,i218:1,i185:1,
  u26i276:1,i201:1,i348:1,i321:1,i136:1,
  * @author root
  *
  */

  public>  public static boolean run(Configuration config,Map<String, String> paths){
  try {
  //config.set("mapred.jar", "C:\\Users\\Administrator\\Desktop\\wc.jar");
  FileSystem fs =FileSystem.get(config);
  Job job =Job.getInstance(config);
  job.setJobName("step2");
  job.setJarByClass(StartRun.class);
  job.setMapperClass(Step2_Mapper.class);
  job.setReducerClass(Step2_Reducer.class);
  //
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(Text.class);
  FileInputFormat.addInputPath(job, new Path(paths.get("Step2Input")));
  Path outpath=new Path(paths.get("Step2Output"));
  if(fs.exists(outpath)){
  fs.delete(outpath,true);
  }
  FileOutputFormat.setOutputPath(job, outpath);
  boolean f= job.waitForCompletion(true);
  return f;
  } catch (Exception e) {
  e.printStackTrace();
  }
  return false;
  }

  static>  //如果使用:用户+物品,同时作为输出key,更优
  protected void map(LongWritable key, Text value,
  Context context)
  throws IOException, InterruptedException {
  String[]  tokens=value.toString().split(",");
  String item=tokens[0];
  String user=tokens[1];
  String action =tokens[2];
  Text k= new Text(user);
  Integer rv =StartRun.R.get(action);
  //if(rv!=null){
  Text v =new Text(item+":"+ rv.intValue());
  context.write(k, v);
  }
  }

  static>  protected void reduce(Text key, Iterable<Text> i,
  Context context)
  throws IOException, InterruptedException {
  Map<String, Integer> r =new HashMap<String, Integer>();
  for(Text value :i){
  String[] vs =value.toString().split(":");
  String item=vs[0];
  Integer action=Integer.parseInt(vs[1]);
  action = ((Integer) (r.get(item)==null?  0:r.get(item))).intValue() + action;
  r.put(item,action);
  }
  StringBuffer sb =new StringBuffer();
  for(Entry<String, Integer> entry :r.entrySet() ){
  sb.append(entry.getKey()+":"+entry.getValue().intValue()+",");
  }
  context.write(key,new Text(sb.toString()));
  }
  }
  }
  package zhouls.bigdata.myMapReduce.tuijian;
  import java.io.IOException;
  import java.util.HashMap;
  import java.util.Map;
  import java.util.Map.Entry;
  import java.util.StringTokenizer;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.io.IntWritable;
  import org.apache.hadoop.io.LongWritable;
  import org.apache.hadoop.io.MapWritable;
  import org.apache.hadoop.io.NullWritable;
  import org.apache.hadoop.io.Text;
  import org.apache.hadoop.mapreduce.Job;
  import org.apache.hadoop.mapreduce.Mapper;
  import org.apache.hadoop.mapreduce.Reducer;
  import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
  import org.apache.log4j.Logger;
  /**
  * 对物品组合列表进行计数,建立物品的同现矩阵
  i100:i1003
  i100:i1051
  i100:i1061
  i100:i1091
  i100:i1141
  i100:i1241
  * @author root
  *
  */

  public>  private final static Text K = new Text();
  private final static IntWritable V = new IntWritable(1);
  public static boolean run(Configuration config,Map<String, String> paths){
  try {
  FileSystem fs =FileSystem.get(config);
  Job job =Job.getInstance(config);
  job.setJobName("step3");
  job.setJarByClass(StartRun.class);
  job.setMapperClass(Step3_Mapper.class);
  job.setReducerClass(Step3_Reducer.class);
  job.setCombinerClass(Step3_Reducer.class);
  //
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(IntWritable.class);
  FileInputFormat.addInputPath(job, new Path(paths.get("Step3Input")));
  Path outpath=new Path(paths.get("Step3Output"));
  if(fs.exists(outpath)){
  fs.delete(outpath,true);
  }
  FileOutputFormat.setOutputPath(job, outpath);
  boolean f= job.waitForCompletion(true);
  return f;
  } catch (Exception e) {
  e.printStackTrace();
  }
  return false;
  }

  static>  protected void map(LongWritable key, Text value,
  Context context)
  throws IOException, InterruptedException {
  String[]  tokens=value.toString().split("\t");
  String[] items =tokens[1].split(",");
  for (int i = 0; i < items.length; i++) {
  String itemA = items.split(":")[0];
  for (int j = 0; j < items.length; j++) {
  String itemB = items[j].split(":")[0];
  K.set(itemA+":"+itemB);
  context.write(K, V);
  }
  }
  }
  }

  static>  protected void reduce(Text key, Iterable<IntWritable> i,
  Context context)
  throws IOException, InterruptedException {
  int sum =0;
  for(IntWritable v :i ){
  sum =sum+v.get();
  }
  V.set(sum);
  context.write(key, V);
  }
  }
  }
  package zhouls.bigdata.myMapReduce.tuijian;
  import java.io.IOException;
  import java.util.HashMap;
  import java.util.Iterator;
  import java.util.Map;
  import java.util.Map.Entry;
  import java.util.StringTokenizer;
  import java.util.regex.Pattern;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.io.IntWritable;
  import org.apache.hadoop.io.LongWritable;
  import org.apache.hadoop.io.MapWritable;
  import org.apache.hadoop.io.NullWritable;
  import org.apache.hadoop.io.Text;
  import org.apache.hadoop.mapreduce.Job;
  import org.apache.hadoop.mapreduce.Mapper;
  import org.apache.hadoop.mapreduce.Reducer;
  import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
  import org.apache.hadoop.mapreduce.lib.input.FileSplit;
  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
  import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;
  import org.apache.log4j.Logger;
  import com.sun.org.apache.xpath.internal.operations.Mult;
  /**
  *
  * 把同现矩阵和得分矩阵相乘
  * @author root
  *
  */

  public>  public static boolean run(Configuration config, Map<String, String> paths) {
  try {
  FileSystem fs = FileSystem.get(config);
  Job job = Job.getInstance(config);
  job.setJobName("step4");
  job.setJarByClass(StartRun.class);
  job.setMapperClass(Step4_Mapper.class);
  job.setReducerClass(Step4_Reducer.class);
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(Text.class);
  // FileInputFormat.addInputPath(job, new
  // Path(paths.get("Step4Input")));
  FileInputFormat.setInputPaths(job,
  new Path[] { new Path(paths.get("Step4Input1")),
  new Path(paths.get("Step4Input2")) });
  Path outpath = new Path(paths.get("Step4Output"));
  if (fs.exists(outpath)) {
  fs.delete(outpath, true);
  }
  FileOutputFormat.setOutputPath(job, outpath);
  boolean f = job.waitForCompletion(true);
  return f;
  } catch (Exception e) {
  e.printStackTrace();
  }
  return false;
  }

  static>  private String flag;// A同现矩阵 or B得分矩阵
  //每个maptask,初始化时调用一次
  protected void setup(Context context) throws IOException,
  InterruptedException {
  FileSplit split = (FileSplit) context.getInputSplit();
  flag = split.getPath().getParent().getName();// 判断读的数据集
  System.out.println(flag + "**********************");
  }
  protected void map(LongWritable key, Text value, Context context)
  throws IOException, InterruptedException {
  String[] tokens = Pattern.compile("[\t,]").split(value.toString());
  if (flag.equals("step3")) {// 同现矩阵
  String[] v1 = tokens[0].split(":");
  String itemID1 = v1[0];
  String itemID2 = v1[1];
  String num = tokens[1];
  Text k = new Text(itemID1);// 以前一个物品为key 比如i100
  Text v = new Text("A:" + itemID2 + "," + num);// A:i109,1
  context.write(k, v);
  } else if (flag.equals("step2")) {// 用户对物品喜爱得分矩阵
  String userID = tokens[0];
  for (int i = 1; i < tokens.length; i++) {
  String[] vector = tokens.split(":");
  String itemID = vector[0];// 物品id
  String pref = vector[1];// 喜爱分数
  Text k = new Text(itemID); // 以物品为key 比如:i100
  Text v = new Text("B:" + userID + "," + pref); // B:u401,2
  context.write(k, v);
  }
  }
  }
  }

  static>  protected void reduce(Text key, Iterable<Text> values, Context context)
  throws IOException, InterruptedException {
  // A同现矩阵 or B得分矩阵
  //某一个物品,针对它和其他所有物品的同现次数,都在mapA集合中
  Map<String, Integer> mapA = new HashMap<String, Integer>();// 和该物品(key中的itemID)同现的其他物品的同现集合// 。其他物品ID为map的key,同现数字为值
  Map<String, Integer> mapB = new HashMap<String, Integer>();// 该物品(key中的itemID),所有用户的推荐权重分数。
  for (Text line : values) {
  String val = line.toString();
  if (val.startsWith("A:")) {// 表示物品同现数字
  String[] kv = Pattern.compile("[\t,]").split(
  val.substring(2));
  try {
  mapA.put(kv[0], Integer.parseInt(kv[1]));
  } catch (Exception e) {
  e.printStackTrace();
  }
  } else if (val.startsWith("B:")) {
  String[] kv = Pattern.compile("[\t,]").split(
  val.substring(2));
  try {
  mapB.put(kv[0], Integer.parseInt(kv[1]));
  } catch (Exception e) {
  e.printStackTrace();
  }
  }
  }
  double result = 0;
  Iterator<String> iter = mapA.keySet().iterator();
  while (iter.hasNext()) {
  String mapk = iter.next();// itemID
  int num = mapA.get(mapk).intValue();
  Iterator<String> iterb = mapB.keySet().iterator();
  while (iterb.hasNext()) {
  String mapkb = iterb.next();// userID
  int pref = mapB.get(mapkb).intValue();
  result = num * pref;// 矩阵乘法相乘计算
  Text k = new Text(mapkb);
  Text v = new Text(mapk + "," + result);
  context.write(k, v);
  }
  }
  }
  }
  }
  package zhouls.bigdata.myMapReduce.tuijian;
  import java.io.IOException;
  import java.util.HashMap;
  import java.util.Iterator;
  import java.util.Map;
  import java.util.Map.Entry;
  import java.util.StringTokenizer;
  import java.util.regex.Pattern;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.io.IntWritable;
  import org.apache.hadoop.io.LongWritable;
  import org.apache.hadoop.io.MapWritable;
  import org.apache.hadoop.io.NullWritable;
  import org.apache.hadoop.io.Text;
  import org.apache.hadoop.mapreduce.Job;
  import org.apache.hadoop.mapreduce.Mapper;
  import org.apache.hadoop.mapreduce.Reducer;
  import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
  import org.apache.hadoop.mapreduce.lib.input.FileSplit;
  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
  import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;
  import org.apache.log4j.Logger;
  import com.sun.org.apache.xpath.internal.operations.Mult;
  /**
  *
  * 把相乘之后的矩阵相加获得结果矩阵
  *
  * @author root
  *
  */

  public>  private final static Text K = new Text();
  private final static Text V = new Text();
  public static boolean run(Configuration config, Map<String, String> paths) {
  try {
  FileSystem fs = FileSystem.get(config);
  Job job = Job.getInstance(config);
  job.setJobName("step5");
  job.setJarByClass(StartRun.class);
  job.setMapperClass(Step5_Mapper.class);
  job.setReducerClass(Step5_Reducer.class);
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(Text.class);
  FileInputFormat
  .addInputPath(job, new Path(paths.get("Step5Input")));
  Path outpath = new Path(paths.get("Step5Output"));
  if (fs.exists(outpath)) {
  fs.delete(outpath, true);
  }
  FileOutputFormat.setOutputPath(job, outpath);
  boolean f = job.waitForCompletion(true);
  return f;
  } catch (Exception e) {
  e.printStackTrace();
  }
  return false;
  }

  static>  /**
  * 原封不动输出
  */
  protected void map(LongWritable key, Text value, Context context)
  throws IOException, InterruptedException {
  String[] tokens = Pattern.compile("[\t,]").split(value.toString());
  Text k = new Text(tokens[0]);// 用户为key
  Text v = new Text(tokens[1] + "," + tokens[2]);
  context.write(k, v);
  }
  }

  static>  protected void reduce(Text key, Iterable<Text> values, Context context)
  throws IOException, InterruptedException {
  Map<String, Double> map = new HashMap<String, Double>();// 结果
  for (Text line : values) {// i9,4.0
  String[] tokens = line.toString().split(",");
  String itemID = tokens[0];
  Double score = Double.parseDouble(tokens[1]);
  if (map.containsKey(itemID)) {
  map.put(itemID, map.get(itemID) + score);// 矩阵乘法求和计算
  } else {
  map.put(itemID, score);
  }
  }
  Iterator<String> iter = map.keySet().iterator();
  while (iter.hasNext()) {
  String itemID = iter.next();
  double score = map.get(itemID);
  Text v = new Text(itemID + "," + score);
  context.write(key, v);
  }
  }
  }
  }
  package zhouls.bigdata.myMapReduce.tuijian;
  import java.io.DataInput;
  import java.io.DataOutput;
  import java.io.IOException;
  import java.util.HashMap;
  import java.util.Map;
  import java.util.regex.Pattern;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.io.LongWritable;
  import org.apache.hadoop.io.Text;
  import org.apache.hadoop.io.WritableComparable;
  import org.apache.hadoop.io.WritableComparator;
  import org.apache.hadoop.mapreduce.Job;
  import org.apache.hadoop.mapreduce.Mapper;
  import org.apache.hadoop.mapreduce.Reducer;
  import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
  /**
  *
  * 按照推荐得分降序排序,每个用户列出10个推荐物品
  *
  * @author root
  *
  */

  public>  private final static Text K = new Text();
  private final static Text V = new Text();
  public static boolean run(Configuration config, Map<String, String> paths) {
  try {
  FileSystem fs = FileSystem.get(config);
  Job job = Job.getInstance(config);
  job.setJobName("step6");
  job.setJarByClass(StartRun.class);
  job.setMapperClass(Step6_Mapper.class);
  job.setReducerClass(Step6_Reducer.class);
  job.setSortComparatorClass(NumSort.class);
  job.setGroupingComparatorClass(UserGroup.class);
  job.setMapOutputKeyClass(PairWritable.class);
  job.setMapOutputValueClass(Text.class);
  FileInputFormat
  .addInputPath(job, new Path(paths.get("Step6Input")));
  Path outpath = new Path(paths.get("Step6Output"));
  if (fs.exists(outpath)) {
  fs.delete(outpath, true);
  }
  FileOutputFormat.setOutputPath(job, outpath);
  boolean f = job.waitForCompletion(true);
  return f;
  } catch (Exception e) {
  e.printStackTrace();
  }
  return false;
  }

  static>  protected void map(LongWritable key, Text value, Context context)
  throws IOException, InterruptedException {
  String[] tokens = Pattern.compile("[\t,]").split(value.toString());
  String u = tokens[0];
  String item = tokens[1];
  String num = tokens[2];
  PairWritable k =new PairWritable();
  k.setUid(u);
  k.setNum(Double.parseDouble(num));
  V.set(item+":"+num);
  context.write(k, V);
  }
  }

  static>  protected void reduce(PairWritable key, Iterable<Text> values, Context context)
  throws IOException, InterruptedException {
  int i=0;
  StringBuffer sb =new StringBuffer();
  for(Text v :values){
  if(i==10)
  break;
  sb.append(v.toString()+",");
  i++;
  }
  K.set(key.getUid());
  V.set(sb.toString());
  context.write(K, V);
  }
  }

  static>  //private String itemId;
  private String uid;
  private double num;
  public void write(DataOutput out) throws IOException {
  out.writeUTF(uid);
  //out.writeUTF(itemId);
  out.writeDouble(num);
  }
  public void readFields(DataInput in) throws IOException {
  this.uid=in.readUTF();
  //this.itemId=in.readUTF();
  this.num=in.readDouble();
  }
  public int compareTo(PairWritable o) {
  int r =this.uid.compareTo(o.getUid());
  if(r==0){
  return Double.compare(this.num, o.getNum());
  }
  return r;
  }
  public String getUid() {
  return uid;
  }
  public void setUid(String uid) {
  this.uid = uid;
  }
  public double getNum() {
  return num;
  }
  public void setNum(double num) {
  this.num = num;
  }
  }

  static>  public NumSort(){
  super(PairWritable.class,true);
  }
  public int compare(WritableComparable a, WritableComparable b) {
  PairWritable o1 =(PairWritable) a;
  PairWritable o2 =(PairWritable) b;
  int r =o1.getUid().compareTo(o2.getUid());
  if(r==0){
  return -Double.compare(o1.getNum(), o2.getNum());
  }
  return r;
  }
  }

  static>  public UserGroup(){
  super(PairWritable.class,true);
  }
  public int compare(WritableComparable a, WritableComparable b) {
  PairWritable o1 =(PairWritable) a;
  PairWritable o2 =(PairWritable) b;
  return o1.getUid().compareTo(o2.getUid());
  }
  }
  }
  package zhouls.bigdata.myMapReduce.tuijian;
  import java.util.HashMap;
  import java.util.Map;
  import org.apache.hadoop.conf.Configuration;

  public>  public static void main(String[] args) {
  Configuration config = new Configuration();
  //config.set("fs.defaultFS", "hdfs://HadoopMaster:9000");
  //config.set("yarn.resourcemanager.hostname", "HadoopMaster");
  //所有mr的输入和输出目录定义在map集合中
  Map<String, String> paths = new HashMap<String, String>();
  //paths.put("Step1Input", "hdfs:/HadoopMaster:9000/tuijian/sam_tianchi_2014002_rec_tmall_log.csv");
  //paths.put("Step1Output", "hdfs:/HadoopMaster:9000/out/tuijian/step1");
  //paths.put("Step2Input", paths.get("Step1Output"));
  //paths.put("Step2Output", "hdfs:/HadoopMaster:9000/out/tuijian/step2");
  //paths.put("Step3Input", paths.get("Step2Output"));
  //paths.put("Step3Output", "hdfs:/HadoopMaster:9000/out/tuijian/step3");
  //paths.put("Step4Input1", paths.get("Step2Output"));
  //paths.put("Step4Input2", paths.get("Step3Output"));
  //paths.put("Step4Output", "hdfs:/HadoopMaster:9000/out/tuijian/step4");
  //paths.put("Step5Input", paths.get("Step4Output"));
  //paths.put("Step5Output", "hdfs:/HadoopMaster:9000/out/tuijian/step5");
  //paths.put("Step6Input", paths.get("Step5Output"));
  //paths.put("Step6Output", "hdfs:/HadoopMaster:9000/out/tuijian/step6");
  paths.put("Step1Input", "./data/tuijian/sam_tianchi_2014002_rec_tmall_log.csv");
  paths.put("Step1Output", "./out/tuijian/step1");
  paths.put("Step2Input", paths.get("Step1Output"));
  paths.put("Step2Output", "./out/tuijian/step2");
  paths.put("Step3Input", paths.get("Step2Output"));
  paths.put("Step3Output", "./out/tuijian/step3");
  paths.put("Step4Input1", paths.get("Step2Output"));
  paths.put("Step4Input2", paths.get("Step3Output"));
  paths.put("Step4Output", "./out/tuijian/step4");
  paths.put("Step5Input", paths.get("Step4Output"));
  paths.put("Step5Output", "./out/tuijian/step5");
  paths.put("Step6Input", paths.get("Step5Output"));
  paths.put("Step6Output", "./out/tuijian/step6");
  Step1.run(config, paths);
  Step2.run(config, paths);
  Step3.run(config, paths);
  Step4.run(config, paths);
  Step5.run(config, paths);
  Step6.run(config, paths);
  }
  public static Map<String, Integer> R = new HashMap<String, Integer>();
  static {
  R.put("click", 1);
  R.put("collect", 2);
  R.put("cart", 3);
  R.put("alipay", 4);
  }
  }

运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-425326-1-1.html 上篇帖子: hadoop磁盘空间不均衡的解决办法 下篇帖子: Hadoop MapReduce编程 API入门系列之小文件合并(二十九)
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表