自定义Hadoop的可序列化类
java原生语言中要想一个自定义类可序列化,很简单,只要让这个类实现java.io.Serializable接口就可以了,但是在Hadoop框架中,要想让自定义类可以被序列化,我们必须手动让其实现WritableCompable接口并且实现write(),readFields(),compareTo()方法。下面就是一个我们自定义的可序列化的类:
[*]/*
[*] */
[*]package com.charles.writable;
[*]
[*]import java.io.DataInput;
[*]import java.io.DataOutput;
[*]import java.io.IOException;
[*]
[*]
[*]import org.apache.hadoop.io.IntWritable;
[*]import org.apache.hadoop.io.Text;
[*]import org.apache.hadoop.io.WritableComparable;
[*]
[*]/**
[*] *
[*] * Description: 这是自定义的Hadoop序列化类,它可以用Hadoop序列化反序列化这个类
[*] *
[*] * @author charles.wang
[*] * @created Jun 2, 2012 11:19:25 AM
[*] *
[*] */
[*]public class PersonWritable implements WritableComparable {
[*]
[*] private Text name;
[*] private IntWritable age;
[*] private Text title;
[*]
[*] public PersonWritable(){
[*] set("someperson",0,"sometitle");
[*] }
[*]
[*] public PersonWritable(String name ,int age, String title){
[*] set(name,age,title);
[*] }
[*]
[*]
[*] public void set(String name ,int age,String title){
[*] this.name =new Text(name);
[*]
[*] age=(age>0)?age:1;
[*] this.age = new IntWritable(age);
[*]
[*] this.title=new Text(title);
[*] }
[*]
[*]
[*] /**
[*] *这个方法用于定义序列化过程,它把这个对象的所有字段依次序列化
[*] */
[*] @Override
[*] public void write(DataOutput out) throws IOException {
[*] // TODO Auto-generated method stub
[*]
[*] name.write(out);
[*] age.write(out);
[*] title.write(out);
[*]
[*] }
[*]
[*] /**
[*] *这个方法用于定义反序列化过程,它吧序列化后的DataInput的内容还原为Hadoop对象
[*] */
[*] @Override
[*] public void readFields(DataInput in) throws IOException {
[*] // TODO Auto-generated method stub
[*]
[*] name.readFields(in);
[*] age.readFields(in);
[*] title.readFields(in);
[*]
[*] }
[*]
[*] /**
[*] * 这是用于2个序列化对象之间的比较
[*] */
[*] @Override
[*] public int compareTo(PersonWritable pO) {
[*] // TODO Auto-generated method stub
[*] int cmp1 = name.compareTo(pO.name);
[*] if(cmp1 != 0){
[*] return cmp1;
[*] }
[*]
[*] int cmp2 = age.compareTo(pO.age);
[*] if(cmp2 !=0){
[*] return cmp2;
[*] }
[*]
[*] int cmp3 = title.compareTo(pO.title);
[*] return cmp3;
[*] }
[*]
[*] /**
[*] * 定义hashcode是个好习惯,我们还是使用最常用的字段分别乘以不同的素数然后相加的方法
[*] */
[*] @Override
[*] public int hashCode(){
[*] return name.hashCode()*71+ age.hashCode()*73+title.hashCode()*127;
[*] }
[*]
[*] @Override
[*] public boolean equals (Object o ){
[*] if ( o instanceof PersonWritable){
[*]
[*] PersonWritable pw = (PersonWritable) o;
[*] boolean equals = name.equals(pw.name) && age.equals(pw.age) && title.equals(pw.title);
[*] return equals;
[*] }
[*] return false;
[*] }
[*]
[*] @Override
[*] public String toString(){
[*] StringBuffer sb = new StringBuffer();
[*] sb.append("[");
[*] sb.append("姓名: "+name+",");
[*] sb.append("年龄: "+age+",");
[*] sb.append("头衔: "+title);
[*] sb.append("]");
[*] return sb.toString();
[*] }
[*]
[*]}
为了方便演示序列化前后的内容,我们定义了一个工具方法,这个方法可以用于跟踪序列化和反序列化的中间产物:
[*]/*
[*] */
[*]package com.charles.writable;
[*]
[*]
[*]import java.io.ByteArrayInputStream;
[*]import java.io.ByteArrayOutputStream;
[*]import java.io.DataInputStream;
[*]import java.io.DataOutputStream;
[*]import java.io.IOException;
[*]
[*]import org.apache.hadoop.io.Writable;
[*]
[*]/**
[*] *
[*] * Description: 这个类提供了工具方法来记录序列化的轨迹
[*] * 因为,在hadoop中序列化和反序列化都是在Writable接口中进行的,Writable是被序列化的Hadoop对象
[*] * 所以我们把序列化的产物存到字节数组中从而可以捕捉到内容
[*] *
[*] * @author charles.wang
[*] * @created Jun 2, 2012 9:32:41 AM
[*] *
[*] */
[*]public class HadoopSerializationUtil {
[*]
[*] //这个方法可以把Hadoop的对象(Writable表示这个是可以序列化的)序列化到字节数组中,
[*] //然后把字节数组中的内容返回出来
[*] //入参,被序列化的数值对象
[*] //返回值:序列化后的字节数组
[*] public static byte[] serialize(Writable writable) throws IOException {
[*] //创建一个字节数组
[*] ByteArrayOutputStream out = new ByteArrayOutputStream();
[*] //创建一个DataOutputStream,并且包装字节数组,用于存放序列化后的字节流
[*] DataOutputStream dataout =new DataOutputStream(out);
[*] //让参数的Hadoop对象序列化到字节流中
[*] writable.write(dataout);
[*] dataout.close();
[*] //返回序列化后的字节流
[*] return out.toByteArray();
[*] }
[*]
[*] //这个方法用于反序列化一个字节数组成Hadoop Writable对象
[*] //入参1:反序列化后的Writable对象存放在这个参数中
[*] //入参2:被反序列化的字节数组
[*] public static void deserialize(Writable writable,byte[] bytes) throws Exception{
[*]
[*] //打开一个字节数组输入流让其指向即将要被处理的字节数组(第二个参数)
[*] ByteArrayInputStream in = new ByteArrayInputStream(bytes);
[*] //打开一个DataInputStream
[*] DataInputStream datain = new DataInputStream(in);
[*] //让Hadoop框架反序列化这个字节数组,还原后的Writable对象存放到第一个参数中
[*] writable.readFields(datain);
[*] datain.close();
[*] }
[*]
[*]
[*]
[*]}
最后,我们用一个Demo例子来演示序列化和反序列化我们自定义的类的对象:
[*]/*
[*] */
[*]package com.charles.writable;
[*]
[*]import org.apache.hadoop.util.StringUtils;
[*]
[*]
[*]/**
[*] *
[*] * Description: 这个例子用于展示自定义的Hadoop序列化类是否工作正常
[*] *
[*] * @author charles.wang
[*] * @created Jun 2, 2012 11:40:01 AM
[*] *
[*] */
[*]public class HadoopObjectSerializationDemo {
[*]
[*]
[*] public static void main(String [] args) throws Exception{
[*]
[*] //第一个实验,把我们自定义的Hadoop可序列化对象进行序列化
[*] System.out.println("实验1: 序列化");
[*] PersonWritable originalPersonWritable = new PersonWritable("Charles Wang" ,26 ,"Technical Lead");
[*] String typeInfo= "被测试的自定义Hadoop可序列化类类型为: "+originalPersonWritable.getClass().getName()+"\n";
[*] String primaryPersonWritableInfo = "序列化前对象为:"+originalPersonWritable.toString()+"\n";
[*] //开始序列化过程
[*] byte[] serializedHadoopValue =HadoopSerializationUtil.serialize(originalPersonWritable);
[*] String lengthInfo= "序列化后的字节数组长度为: "+serializedHadoopValue.length+"\n";
[*] String serializeValueInfo= "序列化后的值为: " +StringUtils.byteToHexString(serializedHadoopValue)+"\n";
[*]
[*] System.out.println(typeInfo+primaryPersonWritableInfo+lengthInfo+serializeValueInfo+"\n");
[*]
[*] System.out.println();
[*]
[*] //第二个实验,把我们序列化之后的字节数组反序列化为原始Hadoop对象
[*] System.out.println("实验2:反序列化");
[*] PersonWritable restoredPersonWritable = new PersonWritable();
[*] String originalByteArrayInfo="被反序列化的字节数组内容为: "+StringUtils.byteToHexString(serializedHadoopValue)+"\n";
[*] //开始反序列化过程
[*] HadoopSerializationUtil.deserialize(restoredPersonWritable, serializedHadoopValue);
[*] String restoredValueInfo = "反序列化之后的Writable对象为: "+restoredPersonWritable.toString();
[*] System.out.println(originalByteArrayInfo+restoredValueInfo+"\n");
[*] }
[*]}
最终结果如下,从而证明,我们自定义的Hadoop可序列化类是正确的:
[*]实验1: 序列化
[*]被测试的自定义Hadoop可序列化类类型为: com.charles.writable.PersonWritable
[*]序列化前对象为:[姓名: Charles Wang,年龄: 26,头衔: Technical Lead]
[*]序列化后的字节数组长度为: 32
[*]序列化后的值为: 0c436861726c65732057616e670000001a0e546563686e6963616c204c656164
[*]
[*]
[*]
[*]实验2:反序列化
[*]被反序列化的字节数组内容为: 0c436861726c65732057616e670000001a0e546563686e6963616c204c656164
[*]反序列化之后的Writable对象为: [姓名: Charles Wang,年龄: 26,头衔: Technical Lead]
页:
[1]