背景:
kafka版本:kafka_2.10-0.8.2.1
服务器IP:10.243.3.17
一:Kafkaserver.properties 文件配置
二:zookeeper.properties 文件配置
三: zookeeper,kafka启动
../bin/zookeeper-server-start.sh -daemon /usr/local/kafka_2.10-0.8.2.1/config/zookeeper.properties
../bin/kafka-server-start.sh -daemon /usr/local/kafka_2.10-0.8.2.1/config/server.properties &
四:创建Topic
../bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test
五: log4j.xml文件配置
六:常见问题
如遇到问题,首先确定参数配置是否正确,尤其是host,port,advertised.host.name; 然后删除kafka-logs-1,zookeeper-logs; 重新启动zookeeper,kafka;
重新创建topic
七:附录, 非log4j java连接kafka配置参考
import org.apache.log4j.Logger;
import scala.App;
/**
* TODO:
*
* @author gengchong
* @date 2016年1月5日 上午9:21:16
*/
public class KafkaApp {
private static final Logger LOGGER = Logger.getLogger(App.class);
public static void main(String[] args) throws InterruptedException {
for (int i = 0; i < 20; i++) {
LOGGER.info("Info [" + i + "]");
Thread.sleep(1000);
}
}
}
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
/**
* TODO:
*
* @author gengchong
* @date 2016年1月5日 下午1:55:56
*/
public class KafakProducer {
private static final String TOPIC = "test";
private static final String CONTENT = "This is a single message";
private static final String BROKER_LIST = "10.243.3.17:8457";
private static final String SERIALIZER_CLASS = "kafka.serializer.StringEncoder";
public static void main(String[] args) {
Properties props = new Properties();
props.put("serializer.class", SERIALIZER_CLASS);
props.put("metadata.broker.list", BROKER_LIST);
ProducerConfig config = new ProducerConfig(props);
Producer producer = new Producer(config);
//Send one message.
KeyedMessage message =
new KeyedMessage(TOPIC, CONTENT);
producer.send(message);
//Send multiple messages.
List messages =
new ArrayList();
for (int i = 0; i < 5; i++) {
messages.add(new KeyedMessage
(TOPIC, "============== send Message. " + i));
}
producer.send(messages);
}
}
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import com.google.common.collect.ImmutableMap;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
/**
* TODO:
*
* @author gengchong
* @date 2016年1月5日 上午9:22:04
*/
public class KafkaConsumer {
private static final String ZOOKEEPER = "10.243.3.17:2181";
//groupName可以随意给,因为对于kafka里的每条消息,每个group都会完整的处理一遍
private static final String GROUP_NAME = "test_group";
private static final String TOPIC_NAME = "test";
private static final int CONSUMER_NUM = 4;
private static final int PARTITION_NUM = 4;
public static void main(String[] args) {
// specify some consumer properties
Properties props = new Properties();
props.put("zookeeper.connect", ZOOKEEPER);
props.put("zookeeper.connectiontimeout.ms", "1000000");
props.put("group.id", GROUP_NAME);
// Create the connection to the cluster
ConsumerConfig consumerConfig = new ConsumerConfig(props);
ConsumerConnector consumerConnector =
Consumer.createJavaConsumerConnector(consumerConfig);
// create 4 partitions of the stream for topic “test”, to allow 4
// threads to consume
Map> topicMessageStreams =
consumerConnector.createMessageStreams(
ImmutableMap.of(TOPIC_NAME, PARTITION_NUM));
List streams = topicMessageStreams.get(TOPIC_NAME);
// create list of 4 threads to consume from each of the partitions
ExecutorService executor = Executors.newFixedThreadPool(CONSUMER_NUM);
// consume the messages in the threads
for (final KafkaStream stream : streams) {
executor.submit(new Runnable() {
public void run() {
for (MessageAndMetadata msgAndMetadata : stream) {
// process message (msgAndMetadata.message())
System.out.println(new String(msgAndMetadata.message()));
}
}
});
}
}
}