当前位置:首页 > 开发 > 开源软件 > 正文

【Kafka六】Kafka Producer和Consumer多Broker、多Partition场景

发表于: 2015-03-31   作者:bit1129   来源:转载   浏览:
摘要: 0.Kafka服务器配置 3个broker 1个topic,6个partition,副本因子是2 2个consumer,每个consumer三个线程并发读取   1. Producer package kafka.examples.multibrokers.producers; import java.util.Properties; import java.util.

0.Kafka服务器配置

3个broker

1个topic,6个partition,副本因子是2

2个consumer,每个consumer三个线程并发读取

 

1. Producer

package kafka.examples.multibrokers.producers;

import java.util.Properties;
import java.util.Random;

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

public class MultiBrokerProducer {
    private static Producer<String, String> producer;
    private static Properties props = new Properties();

    static {
        props.put("metadata.broker.list", "192.168.26.140:9092,192.168.26.140:9093,192.168.26.140:9094");
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        props.put("partitioner.class", "kafka.examples.multibrokers.partitioner.TopicPartitioner");
        props.put("request.required.acks", "1");
        ProducerConfig config = new ProducerConfig(props);
        producer = new Producer<String, String>(config);
    }

    public static void main(String[] args) {
        Random rnd = new Random();
        String topic = "learn.topic.p8.r2";
        for (long i = 0; i < 10000; i++) {
            String key = "" + rnd.nextInt(255);
            String msg = "The " + i + " message is for key - " + key;
            KeyedMessage<String, String> data = new KeyedMessage<String, String>(topic, key, msg);
            producer.send(data);
           System.out.println(i);
        }
        producer.close();
    }
}

 

 

2. Partitioner

package kafka.examples.multibrokers.partitioner;


import kafka.producer.Partitioner;
import kafka.utils.VerifiableProperties;

import java.util.Random;

public class TopicPartitioner implements Partitioner {

    public TopicPartitioner(VerifiableProperties props) {

    }

    @Override
    public int partition(Object key, int numPartitions) {
        int hashCode;
        if (key == null) {
            hashCode = new Random().nextInt(255);
        } else {
            hashCode = key.hashCode();
        }
        if (numPartitions <= 0) {
            return 0;
        }
        return hashCode % numPartitions;
    }
}

 

3. Consumer

package kafka.examples.multibrokers.consumers;


import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;

public class MultiThreadHLConsumer {
    private ExecutorService executor;
    private final ConsumerConnector consumer;
    private final String topic;

    public MultiThreadHLConsumer(String zookeeper, String groupId,
                                 String topic) {
        Properties props = new Properties();
        props.put("zookeeper.connect", zookeeper);
        props.put("group.id", groupId);
        props.put("zookeeper.session.timeout.ms", "500");
        props.put("zookeeper.sync.time.ms", "250");
        props.put("auto.commit.interval.ms", "1000");
        consumer = Consumer.createJavaConsumerConnector(new
                ConsumerConfig(props));
        this.topic = topic;
    }

    public void doConsume(int threadCount) {
        Map<String, Integer> topicCount = new HashMap<String, Integer>();
        // Define thread count for each topic
        topicCount.put(topic, new Integer(threadCount));
        // Here we have used a single topic but we can also add multiple topics to topicCount MAP
        Map<String, List<KafkaStream<byte[], byte[]>>> consumerStreams = consumer.createMessageStreams(topicCount);
        List<KafkaStream<byte[], byte[]>> streams = consumerStreams.get(topic);
        System.out.println("streams length: " + streams.size());
        // Launching the thread pool
        executor = Executors.newFixedThreadPool(threadCount);
        //Creating an object messages consumption
        final CountDownLatch latch = new CountDownLatch(3);
        for (final KafkaStream stream : streams) {
            executor.submit(new Runnable() {
                @Override
                public void run() {
                    ConsumerIterator<byte[], byte[]> consumerIte = stream.iterator();
                    while (consumerIte.hasNext()) {
                        System.out.println("Message from thread :: " + Thread.currentThread().getName() + " -- " + new String(consumerIte.next().message()));
                    }
                    latch.countDown();
                }
            });
        }

        try {
            latch.await();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

        if (consumer != null) {
            consumer.shutdown();
        }

        if (executor != null)
            executor.shutdown();
    }

    public static void main(String[] args) {
        String topic = "learn.topic.p8.r2";
        int threadCount = 3;
        MultiThreadHLConsumer simpleHLConsumer = new MultiThreadHLConsumer("192.168.26.140:2181", "learn.topic.p8.r2.consumers.group", topic);
        simpleHLConsumer.doConsume(threadCount);
    }
}

 

 

4. 注意的问题

 

【Kafka六】Kafka Producer和Consumer多Broker、多Partition场景

  • 0

    开心

    开心

  • 0

    板砖

    板砖

  • 0

    感动

    感动

  • 0

    有用

    有用

  • 0

    疑问

    疑问

  • 0

    难过

    难过

  • 0

    无聊

    无聊

  • 0

    震惊

    震惊

编辑推荐
procedure就是产生消息并将消息发布至broker的应用。 producer连接至任意的活动节点并请求获取某个t
consumer就是接收producer发布的消息进行处理的应用。 上图描述了consumer消费消息的high-level层工
最近和实验室哥们儿在搞storm kafka的东西。 两者始终不能连接起来。 突然想到以前自己写consumer的
High Level Consumer 很多时候,客户程序只是希望从Kafka读取数据,不太关心消息offset的处理。同时
High Level Consumer 很多时候,客户程序只是希望从Kafka读取数据,不太关心消息offset的处理。同时
High Level Consumer 很多时候,客户程序只是希望从Kafka读取数据,不太关心消息offset的处理。同时
High Level Consumer 很多时候,客户程序只是希望从Kafka读取数据,不太关心消息offset的处理。同时
本章主要讲解分析Kafka的Producer的业务逻辑,分发逻辑和负载逻辑都在Producer中维护。 一、Kafka的
看如下代码段,kafka sink的key完全取决于上游发来的event中的header。所以,如果前面的source是像e
转载自:http://my.oschina.net/ielts0909/blog?catalog=263107&p=2 最近一直忙着各种设计和文档,
版权所有 IT知识库 CopyRight © 2009-2015 IT知识库 IT610.com , All Rights Reserved. 京ICP备09083238号