设为首页 收藏本站
查看: 536|回复: 0

[经验分享] hdfs吞吐率与iops测试

[复制链接]

尚未签到

发表于 2016-12-14 06:55:08 | 显示全部楼层 |阅读模式
  1.测试环境:
  1ns+1secondaryns+4ds
  操作系统:Red Hat 4.1.2-46
  CPU:      16  Intel(R) Xeon(R) CPU           E5620  @ 2.40GHz
  MEM:    12 GB
  网卡:      1000Gb/s
  2.测试准备:
  ulimit -n 655350(临时生效)
  ulimit -u 65535   (临时生效)
  3.iops测试:
  3.1测试代码

import org.apache.hadoop.conf.*;   
import org.apache.hadoop.fs.*;   
import org.apache.hadoop.hdfs.*;   
import org.apache.hadoop.hdfs.protocol.*;   
import java.util.concurrent.*;   
import java.lang.InterruptedException.*;   
class single_thread implements Runnable{   
private final CountDownLatch doneSignal;     
private int name;   
private String src;   
private String dst;   
private int size;   
private Configuration conf;   
private FileSystem hdfs;   
private FileSystem local;   
private Path srcPath;   
private Path dstPath;   
private FSDataOutputStream out;   
private FSDataInputStream in;   
private byte buffer[];   
private long i;   
private int b;   
//static runTime_sum=0;   
public single_thread(int na,String src,String dst,int numOfM,CountDownLatch doneSignal_,int count){   
this.doneSignal = doneSignal_;   
try{   
name = na;   
conf = new Configuration();   
hdfs = FileSystem.get(conf);   
local = FileSystem.getLocal(conf);   
long runTime=0,startTime,endTime;   
if(src.contains("dev")){   
//srcPath = new Path(src);   
//  in = local.open(srcPath);   
//in.close();   
}   
else{   
for(int i1=0;i1<count;i1++)   
{   
srcPath = new Path(src+"zero"+name+"."+i1);   
//startTime=System.nanoTime();   
in = hdfs.open(srcPath);   
in.close();   
//endTime=System.nanoTime();   
//runTime=runTime+(endTime-startTime);   
}   
//runTime/=1;   
//runTime_sum+=runTime;   
//System.out.println("iops :"+1000000000/runTime);   
}   
if(dst.contains("dev")){   
//dstPath = new Path(dst);   
//  out = local.create(dstPath);   
//out.close();   
}   
else{   
for(int i1=0;i1<count;i1++)   
{   
dstPath = new Path(dst+"zero"+name+"."+i1);   
//startTime=System.nanoTime();   
out = hdfs.create(dstPath);   
out.close();   
//endTime=System.nanoTime();   
//runTime=runTime+(endTime-startTime);   
}   
//runTime/=1;   
//runTime_sum+=runTime;   
//System.out.println("iops :"+1000000000/runTime);   
}   
buffer = new byte[1024];   
i=0;   
size = numOfM;   
}catch(Exception e){   
System.err.println("error:"+e.toString());   
}   
}   
public void run(){   
try{   
while(i<1024*size){   
i++;   
}   
doneSignal.countDown();   
}catch(Exception e){   
System.err.println("error:"+e.toString());   
}   

}   
}   

public class hdfs_iops{   
public static void main(String[] args)  throws  InterruptedException ,  ExecutionException {   
//System.out.println("test");   
int fileSize = 0;   
int count = Integer.parseInt(args[2]);   
int threadNum = Integer.parseInt(args[3]);   
long totalSize = fileSize*threadNum*1024*1024;   
CountDownLatch doneSignal = new CountDownLatch(threadNum);   
Thread t[] = new Thread[threadNum];   
long startTime=System.nanoTime();   
for(int num=0;num<threadNum;num++){   
t[num] = new Thread(new single_thread(num,args[0],args[1],fileSize,doneSignal,count));   
}   
long endTime=System.nanoTime();   
long runTime=(endTime-startTime)/count;   
//long startTime=System.nanoTime();   
for(int num=0;num<threadNum;num++){   
t[num].start();   
}   
doneSignal.await();   
System.out.println("thread :"+threadNum+"count :"+count+"iops :"+threadNum/((double)runTime/(double)1000000000.0));     
}   
}

  3.2编译
  javac -cp hadoop-core-1.0.3.jar hdfs_iops.java
  3.3执行:
  在非namenode上
  写:
  方法1:hadoop hdfs_iops /dev/zero hdfs://ns:9000/ 10(每个线程io次数) 100(线程个数)
  方法2:
  java -cp :/home/hadoop/hadoop-1.0.3/lib/asm-3.2.jar:/home/hadoop/hadoop-1.0.3/lib/aspectjrt-1.6.5.jar:/home/hadoop/hadoop-1.0.3/lib/aspectjtools-1.6.5.jar:/home/hadoop/hadoop-1.0.3/lib/commons-beanutils-1.7.0.jar:/home/hadoop/hadoop-1.0.3/lib/commons-beanutils-core-1.8.0.jar:/home/hadoop/hadoop-1.0.3/lib/commons-cli-1.2.jar:/home/hadoop/hadoop-1.0.3/lib/commons-codec-1.4.jar:/home/hadoop/hadoop-1.0.3/lib/commons-collections-3.2.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-configuration-1.6.jar:/home/hadoop/hadoop-1.0.3/lib/commons-daemon-1.0.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-digester-1.8.jar:/home/hadoop/hadoop-1.0.3/lib/commons-el-1.0.jar:/home/hadoop/hadoop-1.0.3/lib/commons-httpclient-3.0.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-io-2.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-lang-2.4.jar:/home/hadoop/hadoop-1.0.3/lib/commons-logging-1.1.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-logging-api-1.0.4.jar:/home/hadoop/hadoop-1.0.3/lib/commons-math-2.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-net-1.4.1.jar:/home/hadoop/hadoop-1.0.3/lib/core-3.1.1.jar:/home/hadoop/hadoop-1.0.3/lib/hadoop-capacity-scheduler-1.0.3.jar:/home/hadoop/hadoop-1.0.3/lib/hadoop-fairscheduler-1.0.3.jar:/home/hadoop/hadoop-1.0.3/lib/hadoop-thriftfs-1.0.3.jar:/home/hadoop/hadoop-1.0.3/lib/hsqldb-1.8.0.10.jar:/home/hadoop/hadoop-1.0.3/lib/jackson-core-asl-1.8.8.jar:/home/hadoop/hadoop-1.0.3/lib/jackson-mapper-asl-1.8.8.jar:/home/hadoop/hadoop-1.0.3/lib/jasper-compiler-5.5.12.jar:/home/hadoop/hadoop-1.0.3/lib/jasper-runtime-5.5.12.jar:/home/hadoop/hadoop-1.0.3/lib/jdeb-0.8.jar:/home/hadoop/hadoop-1.0.3/lib/jersey-core-1.8.jar:/home/hadoop/hadoop-1.0.3/lib/jersey-json-1.8.jar:/home/hadoop/hadoop-1.0.3/lib/jersey-server-1.8.jar:/home/hadoop/hadoop-1.0.3/lib/jets3t-0.6.1.jar:/home/hadoop/hadoop-1.0.3/lib/jetty-6.1.26.jar:/home/hadoop/hadoop-1.0.3/lib/jetty-util-6.1.26.jar:/home/hadoop/hadoop-1.0.3/lib/jsch-0.1.42.jar:/home/hadoop/hadoop-1.0.3/lib/junit-4.5.jar:/home/hadoop/hadoop-1.0.3/lib/kfs-0.2.2.jar:/home/hadoop/hadoop-1.0.3/lib/log4j-1.2.15.jar:/home/hadoop/hadoop-1.0.3/lib/mockito-all-1.8.5.jar:/home/hadoop/hadoop-1.0.3/lib/oro-2.0.8.jar:/home/hadoop/hadoop-1.0.3/lib/servlet-api-2.5-20081211.jar:/home/hadoop/hadoop-1.0.3/lib/slf4j-api-1.4.3.jar:/home/hadoop/hadoop-1.0.3/lib/slf4j-log4j12-1.4.3.jar:/home/hadoop/hadoop-1.0.3/lib/xmlenc-0.52.jar  hdfs_iops  /dev/zero hdfs://ns:9000/  10  100
  读:
  方法1:
  hadoop hdfs_iops  hdfs://ns:9000/  /dev/null 10(每个线程io次数) 100(线程个数)
  方法2参见写方法2
  iops测试结果:

  4.吞吐率测试
  测试代码

import org.apache.hadoop.conf.*;     
import org.apache.hadoop.fs.*;     
import org.apache.hadoop.hdfs.*;     
import org.apache.hadoop.hdfs.protocol.*;     
import java.util.concurrent.*;     
import java.lang.InterruptedException.*;     
class single_thread implements Runnable{     
private final CountDownLatch doneSignal;      
private int name;     
private String src;     
private String dst;     
private int size;     
private Configuration conf;     
private FileSystem hdfs;     
private FileSystem local;     
private Path srcPath;     
private Path dstPath;     
private FSDataOutputStream out;     
private FSDataInputStream in;     
private byte buffer[];     
private long i;     
private int b;     
public single_thread(int na,String src,String dst,int numOfM,CountDownLatch doneSignal_){     
this.doneSignal = doneSignal_;     
try{     
name = na;     
conf = new Configuration();     
hdfs = FileSystem.get(conf);     
local = FileSystem.getLocal(conf);     
if(src.contains("dev")){     
srcPath = new Path(src);     
in = local.open(srcPath);     
}     
else{     
srcPath = new Path(src+"pero"+name);     
in = hdfs.open(srcPath);     
}     
if(dst.contains("dev")){     
dstPath = new Path(dst);     
out = local.create(dstPath);     
}     
else{     
dstPath = new Path(dst+"pero"+name);     
out = hdfs.create(dstPath);     
}     
buffer = new byte[4096];     
i=0;     
size = numOfM;     
}catch(Exception e){     
System.err.println("error:"+e.toString());     
}     
}     
public void run(){     
try{     
//long startTime=System.nanoTime();     
while(i<256*size){     
b=in.read(buffer);     
out.write(buffer,0,b);     
i++;     
}     
// long endTime=System.nanoTime();     
// long runTime=endTime-startTime;     
// System.out.println(name+":"+runTime/1000000+"ns");     
}catch(Exception e){     
System.err.println("error:"+e.toString());     
}finally{     
try{     
in.close();     
out.close();}catch(Exception e){     
System.err.println("error:"+e.toString());     
}     
doneSignal.countDown();     
}     
// System.out.println(1024*1024*size);     
}     
}     

public class hdfs_test{     
public static void main(String[] args)  throws  InterruptedException ,  ExecutionException {     
//System.out.println("test");     
int fileSize = Integer.parseInt(args[2]);     
int threadNum = Integer.parseInt(args[3]);     
double totalSize = fileSize*threadNum*1024.0*1024.0;     
CountDownLatch doneSignal = new CountDownLatch(threadNum);     
Thread t[] = new Thread[threadNum];     
long startTime=System.nanoTime();     
for(int num=0;num<threadNum;num++){     
t[num] = new Thread(new single_thread(num,args[0],args[1],fileSize,doneSignal));     
t[num].start();     
}     
//for(int num=0;num<threadNum;num++){     
//}     
doneSignal.await();     
long endTime=System.nanoTime();     
long runTime=endTime-startTime;     
System.out.println("totalSize:"+fileSize*threadNum+"MB   "+"totalTime:"+runTime/1000000+"ms");     
if(fileSize==0)     
System.out.println("iops :"+threadNum/((double)runTime/(double)1000000000.0));     
else
System.out.println("speed: "+totalSize*1000.0/(double)runTime+"  totalsize: "+totalSize+"   runtime: "+runTime);     
}     
}

  编译与执行参照iops测试
  (测试程序后面两个数字参数分别代表测试文件大小(以M为单位)和线程数目。)
  吞吐率测试结果(由于数据规模等原因,部分结果不全):

运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-313825-1-1.html 上篇帖子: hbase 导入导出 下篇帖子: hive进行count(*)查询报错
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表