设为首页 加入收藏

TOP

大数据系列之实时处理Storm(五)Storm与Kafka集成
2019-03-19 14:13:14 】 浏览:62
Tags:数据 系列 实时 处理 Storm Kafka 集成
版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/u011444062/article/details/81271867

我们最常用的或许就是Storm从Kafka中读取数据转换成Tuple了,现在我们就将Storm与Kafka来进行整合。

1.pom.xml

<dependency>
  <groupId>org.apache.storm</groupId>
  <artifactId>storm-kafka</artifactId>
  <version>1.1.0</version>
</dependency>
<dependency>
  <groupId>org.apache.kafka</groupId>
  <artifactId>kafka_2.10</artifactId>
  <version>0.8.1.1</version>
  <exclusions>
    <exclusion>
      <groupId>org.apache.zookeeper</groupId>
      <artifactId>zookeeper</artifactId>
    </exclusion>
    <exclusion>
      <groupId>log4j</groupId>
      <artifactId>log4j</artifactId>
    </exclusion>
  </exclusions>
</dependency>

2.代码:

2.1Bolt:

package com.storm.kafka;

import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.IRichBolt;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;

import java.util.Map;

/**
 * @author 邹培贤
 * @Title: ${file_name}
 * @Package ${package_name}
 * @Description: ${todo}
 * @date 2018/7/2915:27
 */
public class SplitBolt implements IRichBolt {
    private TopologyContext context;
    private  OutputCollector collector;
    @Override
    public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
       this.context=topologyContext;
        this.collector=outputCollector;
    }

    @Override
    public void execute(Tuple tuple) {
        String line =tuple.getString(0);
        System.out.println(line);

    }

    @Override
    public void cleanup() {

    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
        outputFieldsDeclarer.declare(new Fields("word"));
    }

    @Override
    public Map<String, Object> getComponentConfiguration() {
        return null;
    }
}

2.2APP:

package com.storm.kafka;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.kafka.*;
import org.apache.storm.spout.SchemeAsMultiScheme;
import org.apache.storm.topology.TopologyBuilder;

import java.util.UUID;

/**
 * @author 邹培贤
 * @Title: ${file_name}
 * @Package ${package_name}
 * @Description: ${todo}
 * @date 2018/7/2915:30
 */
public class APP {
    public static void main(String args[]){
        TopologyBuilder builder=new TopologyBuilder();
        String zkConnString="s10:2181,s11:2181,s12:2181";
        BrokerHosts hosts = new ZkHosts(zkConnString);
        //Spout配置
        SpoutConfig spoutConfig=new SpoutConfig(hosts,"test","/test",UUID.randomUUID().toString());
        spoutConfig.scheme=new SchemeAsMultiScheme(new StringScheme());
        KafkaSpout kafkaSpout =new KafkaSpout(spoutConfig);
        builder.setSpout("kafkaspout",kafkaSpout).setNumTasks(2);
        builder.setBolt("split-bolt",new SplitBolt(),2).shuffleGrouping("kafkaspout").setNumTasks(2);
        Config conf = new Config();
        conf.setNumWorkers(2);
        conf.setDebug(true);
         //本地模式
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wc", conf, builder.createTopology());

    }
  
}

3.运行结果:



】【打印繁体】【投稿】【收藏】 【推荐】【举报】【评论】 【关闭】 【返回顶部
上一篇kafka的partition如何分布到不同.. 下一篇zookeeper集群和kafka集群及nginx..

最新文章

热门文章

Hot 文章

Python

C 语言

C++基础

大数据基础

linux编程基础

C/C++面试题目