a1.sources = r1 a1.sinks = k1 a1.channels = c1 #使用内置kafka source a1.sources.r1.type = org.apache.flume.source.kafka.KafkaSource #kafka连接的zookeeper a1.sources.r1.zookeeperConnect = localhost:2181 a1.sources.r1.topic = kkt-test-topic a1.sources.r1.batchSize = 100 a1.sources.r1.channels =c1 #这里写到hdfs中 a1.sinks.k1.channel = c1 a1.sinks.k1.type = hdfs a1.sinks.k1.hdfs.path =hdfs://iz94rak63uyz/user/flume a1.sinks.k1.hdfs.writeFormat = Text a1.sinks.k1.hdfs.fileType = DataStream a1.sinks.k1.hdfs.rollInterval = 0 a1.sinks.k1.hdfs.rollSize = 1000000 a1.sinks.k1.hdfs.rollCount = 0 a1.sinks.k1.hdfs.batchSize = 1000 a1.sinks.k1.hdfs.txnEventMax = 1000 a1.sinks.k1.hdfs.callTimeout = 60000 a1.sinks.k1.hdfs.appendTimeout = 60000 # Use a channel which buffers events in memory a1.channels.c1.type = memory a1.channels.c1.capacity = 1000 a1.channels.c1.transactionCapacity = 1000 # Bind the source and sink to the channel a1.sources.r1.channels = c1 a1.sinks.k1.channel = c1
启动flume命令:
flume-ng agent --conf-file flume.conf --name a1 -Dflume.root.logger=INFO,console --conf = conf
时间: 2024-10-17 23:40:45