本例以kafka2.10_0.10.0.0为例,不同版本的kafka Java api有些区别!
增加maven依赖
<dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka_2.10</artifactId> <version>0.10.0.0</version> </dependency>
生产者
package com.zns.kafka; import java.util.Properties; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.serialization.StringSerializer; public class KafkaProducerTest { public static String topicName = "test"; public static void main(String[] args) { Properties props = new Properties(); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092");// 9092是kafka默认端口 props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); KafkaProducer<String, String> producer = new KafkaProducer<String, String>(props); for (int i = 1; i <= 10; i++) { ProducerRecord<String, String> message = new ProducerRecord<String, String>(topicName, "hello world " + i); producer.send(message); producer.flush(); } } }
消费者
package com.zns.kafka; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.common.serialization.StringSerializer; import kafka.consumer.ConsumerConfig; import kafka.consumer.ConsumerIterator; import kafka.consumer.KafkaStream; import kafka.javaapi.consumer.ConsumerConnector; import kafka.serializer.StringDecoder; import kafka.utils.VerifiableProperties; public class KafkaConsumerTest { public static String topicName = "test"; public static void main(String[] args) { Properties props = new Properties(); props.put("zookeeper.connect", "127.0.0.1:2181");// 2181是zookeeper默认端口 props.put("group.id", "test-group"); props.put("zookeeper.session.timeout.ms", "100000"); props.put("zookeeper.sync.time.ms", "200"); props.put("auto.commit.interval.ms", "1000"); props.put("auto.offset.reset", "smallest"); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); ConsumerConfig config = new ConsumerConfig(props); ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topicName, new Integer(1)); StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties()); StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties()); Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap, keyDecoder, valueDecoder); KafkaStream<String, String> stream = consumerMap.get(topicName).get(0); ConsumerIterator<String, String> it = stream.iterator(); while (it.hasNext()) { System.out.println("收到消息:" + it.next().message()); } } }
确保启动运行了zookeeper和kafka
先后启动运行生产者和消费者,可以看到消费者端接收到了消息...
原文地址:https://www.cnblogs.com/zengnansheng/p/10389745.html
时间: 2024-10-11 12:25:35