kafka 0.8.2 消息生产者 KafkaProducer

package com.hashleaf.kafka;

import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;

/**
 * 消息生产者 KafkaProducer
 * @author xiaojf [email protected]
 * @since 2015-7-15 下午10:50:01
 */
public class NewProducer {
    //发送消息的topic
    public static final String HASHLEAF_KAFKA_TOPIC = "hashleaf_topic";
    private final KafkaProducer<String, String> producer;

    public NewProducer() {
        Properties props = new Properties();
        //指定代理服务器的地址
        props.put("metadata.broker.list", "192.168.66.2:9092,192.168.66.3:9092,192.168.66.4:9092");

        //配置value的序列化类
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        //配置key的序列化类
        props.put("key.serializer.class", "kafka.serializer.StringEncoder");
        //指定分区
        props.put("partitioner.class", "com.hashleaf.kafka.MyPartitioner");
        //指定topic的分区数
        props.put("num.pratitions", "4");

        //request.required.acks
        //0, which means that the producer never waits for an acknowledgement from the broker (the same behavior as 0.7). This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails).
        //1, which means that the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost).
        //-1, which means that the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains.
        props.put("request.required.acks","-1");

        //创建producer 对象
        //producer = new Producer<String, String>(new ProducerConfig(props));

        producer = new KafkaProducer<String, String>(props);

    }

    public void produce(){
        int count = 10000;
        ExecutorService executor = Executors.newFixedThreadPool(4);
        for (int i = 0; i < count; i++) {
            final String key = String.valueOf(i);
            final String message = "hashleaf-"+i;
            executor.submit(new Runnable() {

                @Override
                public void run() {
                    producer.send(new ProducerRecord<String, String>(HASHLEAF_KAFKA_TOPIC, message));
                }
            });

        }
    }

    public static void main(String[] args) {
        new MyProducer().produce();
    }
}

自定义分区

package com.hashleaf.kafka;

import kafka.producer.Partitioner;
import kafka.utils.VerifiableProperties;

/**
 * 自定义分区规则
 * @author xiaojf [email protected]
 * @since 2015-7-15 下午11:57:23
 */
public class MyPartitioner implements Partitioner {

    public MyPartitioner(VerifiableProperties props){
    }
    @Override
    public int partition(Object obj, int numPartitions) {
        System.out.println(obj);
        return Integer.parseInt( obj +"") % numPartitions;
    }

}

maven

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <groupId>com.hashleaf</groupId>
    <artifactId>kafka</artifactId>
    <version>0.0.1-SNAPSHOT</version>

    <dependencies>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka_2.9.2</artifactId>
            <version>0.8.2.1</version>
        </dependency>
        <dependency>
            <groupId>log4j</groupId>
            <artifactId>log4j</artifactId>
            <version>1.2.15</version>
            <exclusions>
                <exclusion>
                    <artifactId>jmxtools</artifactId>
                    <groupId>com.sun.jdmk</groupId>
                </exclusion>
                <exclusion>
                    <artifactId>jmxri</artifactId>
                    <groupId>com.sun.jmx</groupId>
                </exclusion>
                <exclusion>
                    <artifactId>jms</artifactId>
                    <groupId>javax.jms</groupId>
                </exclusion>
                <exclusion>
                    <artifactId>mail</artifactId>
                    <groupId>javax.mail</groupId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>4.11</version>
            <scope>test</scope>
        </dependency>
    </dependencies>

    <build>
        <plugins>
            <plugin>
                <artifactId>maven-compiler-plugin</artifactId>
                <configuration>
                    <source>1.6</source>
                    <target>1.6</target>
                </configuration>
            </plugin>
        </plugins>
    </build>
</project>
时间: 2024-08-08 14:13:26

kafka 0.8.2 消息生产者 KafkaProducer的相关文章

kafka 0.10.2 消息生产者(producer)

package cn.xiaojf.kafka.producer; import org.apache.kafka.clients.producer.*; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka

kafka 0.8.2 消息生产者 producer

package com.hashleaf.kafka; import java.util.Properties; import kafka.javaapi.producer.Producer; import kafka.producer.KeyedMessage; import kafka.producer.ProducerConfig; /** * 消息生产者 * @author xiaojf [email protected] * @since 2015-7-15 下午10:50:01 */

kafka 0.10.2 消息生产者

package cn.xiaojf.kafka.producer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; import java.util.Properties; /** * Created by 肖建锋 on

kafka 0.10.2 消息消费者

package cn.xiaojf.kafka.consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.Kaf

kafka 0.8.2 消息消费者 consumer

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersi

【原创】Kafka 0.11消息设计

Kafka 0.11版本增加了很多新功能,包括支持事务.精确一次处理语义和幂等producer等,而实现这些新功能的前提就是要提供支持这些功能的新版本消息格式,同时也要维护与老版本的兼容性.本文将详细探讨Kafka 0.11新版本消息格式的设计,其中会着重比较新旧两版本消息格式在设计上的异同.毕竟只有深入理解了Kafka的消息设计,我们才能更好地学习Kafka所提供的各种功能. 1. Kafka消息层次设计 不管是0.11版本还是之前的版本,Kafka的消息层次都是分为两层:消息集合(messa

Kafka(八)Python生产者和消费者API使用

单线程生产者 #!/usr/bin/env python # -*- coding: utf-8 -*- import random import sys from kafka import KafkaProducer from kafka.client import log import time import json __metaclass__ = type class Producer:     def __init__(self, KafkaServer='127.0.0.1', Ka

Kafka 0.8协议

介绍 概述 预备知识 网络 分区和引导 分区策略 批量处理 版本控制和兼容性 协议 Protocol Primitive Types Notes on reading the request format grammars Common Request and Response Structure Requests Responses Message sets Compression The APIs Metadata API Metadata Request Metadata Response

kafka 0.8.1 新producer 源码简单分析

1 背景 最近由于项目需要,需要使用kafka的producer.但是对于c++,kafka官方并没有很好的支持. 在kafka官网上可以找到0.8.x的客户端.可以使用的客户端有C版本客户端,此客户端虽然目前看来还较为活跃,但是代码问题还是较多的,而且对于c++的支持并不是很好. 还有c++版本,虽然该客户端是按照c++的思路设计,但是最近更新时间为2013年12月19日,已经很久没有更新了. 从官方了解到,kafka作者对于现有的producer和consumer的设计是不太满意的.他们打算