设为首页 收藏本站
查看: 1831|回复: 0

[经验分享] nodejs+kafka+storm+hbase 开发

[复制链接]

尚未签到

发表于 2017-2-24 09:52:38 | 显示全部楼层 |阅读模式
  1.环境介绍
  如图所示,NODEJS做为数据源的的产生者产生消息,发到Kafka队列,然后参见红线,表示本地开发的环境下数据的流向(本地开发时,storm topology运行在本地模式)
DSC0000.png

  2.搭建环境,我采用的是eclipse+maven
  1.建立一个maven工程, 然后将pom文件修改如下:


DSC0001.gif DSC0002.gif


<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.h3c.storm</groupId>
<artifactId>storm-samples</artifactId>
<packaging>jar</packaging>
<version>1.0-SNAPSHOT</version>
<name>storm-kafka-test</name>
<url>http://maven.apache.org</url>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>3.8.1</version>
<scope>test</scope>
</dependency>
<dependency>  
<groupId>jdk.tools</groupId>  
<artifactId>jdk.tools</artifactId>  
<version>1.7</version>  
<scope>system</scope>  
<systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>  
</dependency>   
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-core</artifactId>
<version>0.10.0</version>
<!-- keep storm out of the jar-with-dependencies -->
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.9.2</artifactId>
<version>0.8.1.1</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>  
<groupId>org.apache.storm</groupId>  
<artifactId>storm-kafka</artifactId>  
<version>0.9.2-incubating</version>  
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-hbase</artifactId>
<version>0.10.0</version>
</dependency>  
</dependencies>
</project>
View Code  2.nodeJS发消息的示例代码,当然,首先要手动在kafka里新建一个topic对应代码里的topic,我这里创建的topic是"historyclients"





var kafka = require('kafka-node');
var Producer = kafka.Producer;
var KeyedMessage = kafka.KeyedMessage;
var conf = '172.27.8.111:2181,172.27.8.112:2181,172.27.8.119:2181';
var client = new kafka.Client(conf);
var producer = new Producer(client);
var clientOnlineInfo ={"clientMAC":"0000-0000-0002",
"acSN":"210235A1AMB159000008",
"onLineTime":"2016-06-27 10:00:00"};
var clientOnlineInfoStr = JSON.stringify(clientOnlineInfo);
var msg = [
{ topic: 'historyclients', messages: clientOnlineInfoStr, partition: 0 }
];

producer.on('ready', function () {
producer.send(msg, function (err, data) {
console.log("done!")
console.log(data);
});
});
producer.on('error', function (err) {
console.error(err);
});
View Code  3.spout代码





package com.h3c.storm;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
public class KafkaSpout extends BaseRichSpout{
private SpoutOutputCollector collector;
private  ConsumerConnector consumer;
private  String topic;
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap;
private static ConsumerConfig createConsumerConfig()  
{  
Properties props = new Properties();  
props.put("zookeeper.connect", "172.27.8.111:2181,172.27.8.112:2181,172.27.8.119:2181");  
props.put("group.id", "group1");  
props.put("zookeeper.session.timeout.ms", "40000");  
props.put("zookeeper.sync.time.ms", "200");  
props.put("auto.commit.interval.ms", "1000");  
return new ConsumerConfig(props);  
}   
@Override
public void open(Map conf, TopologyContext context,SpoutOutputCollector collector) {
System.err.println("open!!!!!!!!!!!!!!!");
this.collector = collector;
/* create consumer */
this.topic = "historyclients";
this.consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig());
/* topic HashMap,which means the map can include multiple topics */
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, new Integer(1));  
this.consumerMap = consumer.createMessageStreams(topicCountMap);  
}
@Override
public void nextTuple() {
KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);  
ConsumerIterator<byte[], byte[]> it = stream.iterator();
String toSay = "";
while (it.hasNext()) {
toSay = new String(it.next().message());
System.err.println("receive:" + toSay);  
this.collector.emit(new Values(toSay));
}              
}
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("clientInfo"));
}
}
View Code  4.storm-hbase API 中要求实现的mapper代码





package com.h3c.storm;
import org.apache.storm.hbase.bolt.mapper.HBaseMapper;
import org.apache.storm.hbase.common.ColumnList;
import backtype.storm.tuple.Tuple;
public class MyHBaseMapper implements HBaseMapper {
public ColumnList columns(Tuple tuple) {
ColumnList cols = new ColumnList();
//参数依次是列族名,列名,值
cols.addColumn("f1".getBytes(), "colMAC".getBytes(), tuple.getStringByField("clientInfo").getBytes());
//System.err.println("BOLT + " + tuple.getStringByField("clientInfo"));
//cols.addColumn("f1".getBytes(), "hhhhhhh".getBytes(), "0000-0000-0001".getBytes());
//System.err.println("BOLT + " + tuple.getStringByField("clientInfo"));
return cols;
}
public byte[] rowKey(Tuple tuple) {
//return tuple.getStringByField("clientInfo").getBytes();
return "newRowKey".getBytes();
}
}
Mapper  5.topology代码





package com.h3c.storm;
import java.util.Map;
import java.util.Random;
import org.apache.storm.hbase.bolt.HBaseBolt;
import org.apache.storm.hbase.bolt.mapper.HBaseMapper;
import java.util.HashMap;  
import java.util.List;  
import java.util.Properties;  
import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.StormSubmitter;
import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.BasicOutputCollector;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.TopologyBuilder;
import backtype.storm.topology.base.*;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;
import backtype.storm.utils.Utils;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.ConsumerTimeoutException;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
public class PersistTopology {
private static final String KAFKA_SPOUT = "KAFKA_SPOUT";
private static final String HBASE_BOLT = "HBASE_BOLT";
public static void main(String[] args) throws Exception {
/* define spout */
KafkaSpout kafkaSpout = new KafkaSpout();
System.setProperty("hadoop.home.dir", "E:\\eclipse\\");
/* define HBASE Bolt */
HBaseMapper mapper = new MyHBaseMapper();
HBaseBolt hbaseBolt = new HBaseBolt("historyclients", mapper).withConfigKey("hbase.conf");
/* define topology*/
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(KAFKA_SPOUT, kafkaSpout);
builder.setBolt(HBASE_BOLT, hbaseBolt).shuffleGrouping(KAFKA_SPOUT);
Config conf = new Config();
conf.setDebug(true);
Map<String, Object> hbConf = new HashMap<String, Object>();
//        if(args.length > 0){
//            hbConf.put("hbase.rootdir", args[0]);
//        }
//hbConf.put("hbase.rootdir", "hdfs://172.27.8.111:8020/apps/hbase/data");
conf.put("hbase.conf", hbConf);
if (args != null && args.length > 0) {
conf.setNumWorkers(3);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
} else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", conf, builder.createTopology());
Utils.sleep(600000);
cluster.killTopology("test");
cluster.shutdown();
}
}
}
View Code  6.需要从集群中取中hbase-site.xml这个文件,加到项目里,在buildpath中可设置
DSC0003.png

  7.在C:\Windows\System32\drivers\etc下把hosts文件加上到集群的IP与域名的映射
  172.27.8.111 node1.hde.h3c.com node1
172.27.8.112  node2.hde.h3c.com node2
172.27.8.119  node3.hde.h3c.com node3
  8. 出现java.io.IOException: Could not locate executable null\bin\winutils.exe in the Hadoop binaries.的解决办法
  网上下载winutils.exe这个文件,找一个地方放好,比如我放在E:\eclipse\bin 下面,前面一定要有个“bin”
  然后在代码里加上这句即可



System.setProperty("hadoop.home.dir", "E:\\eclipse\\");
  参考文章

http://www.tuicool.com/articles/r6ZZBjU

运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-346467-1-1.html 上篇帖子: rsyslog对接kafka 下篇帖子: kafka 学习之初体验
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表