设为首页 加入收藏

TOP

mysql同步数据到flume,然后flume同步数据到kafka
2019-04-22 02:08:59 】 浏览:101
Tags:mysql 同步 数据 flume 然后 kafka

自定义插件包下载地址 https://download.csdn.net/download/a1213353721/10935724

下面是flume的配置信息,我是在CDH集群环境上配置的,经测试可用

tier1.sources = s-1
tier1.channels = c-1
tier1.sinks = k-1 k-2
#这个是配置failover的关键,需要有一个sink group
tier1.sinkgroups = g-1
tier1.sinkgroups.g-1.sinks = k-1 k-2
#处理的类型是failover
tier1.sinkgroups.g-1.processor.type = failover
#优先级,数字越大优先级越高,每个sink的优先级必须不相同
tier1.sinkgroups.g-1.processor.priority.k-1 = 5
tier1.sinkgroups.g-1.processor.priority.k-2 = 10
#设置为10秒,当然可以根据你的实际状况更改成更快或者很慢
tier1.sinkgroups.g-1.processor.maxpenalty = 10000
########## 数据通道的定义
#数据量不大,直接放内存。其实还可以放在JDBC,kafka或者磁盘文件等
tier1.channels.c-1.type = memory
#通道队列的最大长度
tier1.channels.c-1.capacity = 100000
#putList和takeList队列的最大长度,sink从capacity中抓取batchsize个event,放到这个队列。所以此参数最好比capacity小,比sink的batchsize大。
#官方定义:The maximum number of events the channel will take from a source or give to a sink per transaction.
tier1.channels.c-1.transactionCapacity = 1000
tier1.channels.c-1.byteCapacityBufferPercentage = 20
###默认值的默认值等于JVM可用的最大内存的80%,可以不配置
#tier1.channels.c-1.byteCapacity = 800000

#########sql source#################
#source s-1用到的通道,和sink的通道要保持一致,否则就GG了
tier1.sources.s-1.channels=c-1
######### For each one of the sources, the type is defined
tier1.sources.s-1.type = org.keedio.flume.source.SQLSource
tier1.sources.s-1.hibernate.connection.url = jdbc:mysql://10.20.1.18:3306/rdms
######### Hibernate Database connection properties
tier1.sources.s-1.hibernate.connection.user = root
tier1.sources.s-1.hibernate.connection.password = bigdata
tier1.sources.s-1.hibernate.connection.autocommit = true
tier1.sources.s-1.hibernate.dialect = org.hibernate.dialect.MySQL5Dialect
tier1.sources.s-1.hibernate.connection.driver_class = com.mysql.jdbc.Driver
tier1.sources.s-1.run.query.delay=10000
tier1.sources.s-1.status.file.path = /usr/lib/flume-ng/
#tier1.sources.s-1.status.file.name = SQLSource.status
tier1.sources.s-1.status.file.name.prefix = SQLSource
tier1.sources.s-1.status.file.name.suffix = .status
######## Custom query
tier1.sources.s-1.start.from = 0
#tier1.sources.s-1.table = imsi_test
tier1.sources.s-1.table.prefix = imsi
#使用完整的query时,此配置中的所有带前后缀的配置都不生效,‘id’ 字段设置别名后,order.by的属性需要与id字段名或别名一至
#tier1.sources.s-1.custom.query = select id as ‘tid’,area as ‘fee’ from imsi_test where id > @@ order by id asc
#使用前后缀查询语句时order.by 属性需要注释
#tier1.sources.s-1.order.by = tid
tier1.sources.s-1.custom.query.prefix = select * from
tier1.sources.s-1.custom.query.suffix = where id > @@ order by id asc
tier1.sources.s-1.batch.size = 100
tier1.sources.s-1.max.rows = 100
tier1.sources.s-1.hibernate.connection.provider_class = org.hibernate.connection.C3P0ConnectionProvider
tier1.sources.s-1.hibernate.c3p0.min_size=5
tier1.sources.s-1.hibernate.c3p0.max_size=20

######### sinks 1
#sink k-1用到的通道,和source的通道要保持一致,否则取不到数据
tier1.sinks.k-1.channel = c-1
tier1.sinks.k-1.type = org.apache.flume.sink.kafka.KafkaSink
tier1.sinks.k-1.topic = topic_kafkajdbc
tier1.sinks.k-1.kafka.bootstrap.servers = admin:9092,dn02:9092,nn01:9092
tier1.sinks.k-1.kafka.producer.acks = 1

#每批次处理的event数量
tier1.sinks.k-1.batchSize = 100

######### sinks 2
#sink k-2用到的通道,和source的通道要保持一致,否则取不到数据
tier1.sinks.k-2.channel = c-1
tier1.sinks.k-2.type = org.apache.flume.sink.kafka.KafkaSink
tier1.sinks.k-2.kafka.topic = topic_kafkajdbc
tier1.sinks.k-2.kafka.bootstrap.servers = admin:9092,dn02:9092,nn01:9092
tier1.sinks.k-2.kafka.producer.acks = 1
tier1.sinks.k-2.batchSize = 100

】【打印繁体】【投稿】【收藏】 【推荐】【举报】【评论】 【关闭】 【返回顶部
上一篇flume读取日志数据写入kafka &nbs.. 下一篇在FlumeNG中如何使用 %{host}占位..

最新文章

热门文章

Hot 文章

Python

C 语言

C++基础

大数据基础

linux编程基础

C/C++面试题目