文章目录
- 1、消费模式
- 1.1、创建一个3分区1副本的 主题 my_topic1
- 1.2、创建生产者 KafkaProducer1
- 1.2、创建消费者
- 1.2.1、创建消费者 KafkaConsumer1Group1 并指定组 my_group1
- 1.2.3、创建消费者 KafkaConsumer2Group1 并指定组 my_group1
- 1.2.3、创建消费者 KafkaConsumer3Group1 并指定组 my_group1
- 1.2.4、创建消费者 KafkaConsumer1Group2 并指定组 my_group2
- 1.3、eagle for apache kafka
- 1.3.1、查看分区0的数据
- 1.3.2、查看分区1的数据
- 1.3.3、查看分区2的数据
1、消费模式
消费模式
点对点:一个组消费消息时,只能由组内的一个消费者消费一次 避免重复消费
发布订阅:多个组消费消息时,每个组都可以消费一次消息
1.1、创建一个3分区1副本的 主题 my_topic1
1.2、创建生产者 KafkaProducer1
package com.atguigu.kafka.producer;
import org.apache.kafka.clients.producer.*;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
public class KafkaProducer1 {/*** 主函数用于演示如何向Kafka的特定主题发送消息。** @param args 命令行参数(未使用)*/public static void main(String[] args) throws ExecutionException, InterruptedException {// 初始化Kafka生产者配置Properties props = new Properties();props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.74.148:9092"); // 指定Kafka broker的地址和端口props.put("acks", "all"); // 确认消息写入策略props.put("retries", 0); // 消息发送失败时的重试次数props.put("linger.ms", 1); // 发送缓冲区等待时间props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // 指定键的序列化器props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // 指定值的序列化器// 创建Kafka生产者实例Producer<String, String> producer = new KafkaProducer<>(props);// 发送消息到主题"my_topic3"// 异步发送消息:不接收kafka的响应//producer.send(new ProducerRecord<String, String>("my_topic3", "hello,1,2,3"));// 注释掉的循环代码块展示了如何批量发送消息//for (int i = 0; i < 100; i++)// producer.send(new ProducerRecord<String, String>("my-topic", Integer.toString(i), Integer.toString(i)));for (int i=0;i<20;i++) {producer.send(new ProducerRecord<String, String>("my_topic1",i%3,"null","我是"+i),new Callback() {//消息发送成功,kafka broker ack 以后回调@Overridepublic void onCompletion(RecordMetadata recordMetadata, Exception e) {//exception:如果有异常代表消息未能正常发送到kafka,没有异常代表消息发送成功://此时kafka的消息不一定持久化成功(需要kafka生产者加配置)//RecordMetadata代表发送成功的消息的元数据System.out.println("partition = " + recordMetadata.partition());}});}// 关闭生产者实例producer.close();}
}
partition = 2
partition = 2
partition = 2
partition = 2
partition = 2
partition = 2
partition = 1
partition = 1
partition = 1
partition = 1
partition = 1
partition = 1
partition = 1
partition = 0
partition = 0
partition = 0
partition = 0
partition = 0
partition = 0
partition = 0
1.2、创建消费者
1.2.1、创建消费者 KafkaConsumer1Group1 并指定组 my_group1
package com.atguigu.kafka.consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;
public class KafkaConsumer1Group1 {/*** 主函数入口,创建并运行一个Kafka消费者来消费主题"foo"和"bar"的消息。** @param args 命令行参数(未使用)*/public static void main(String[] args) {// 初始化Kafka消费者配置Properties props = new Properties();props.setProperty("bootstrap.servers", "192.168.74.148:9092"); // Kafka broker的地址和端口props.setProperty("group.id", "my_group1"); // 消费者组IDprops.setProperty("enable.auto.commit", "true"); // 自动提交偏移量props.setProperty("auto.commit.interval.ms", "1000"); // 自动提交偏移量的时间间隔props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); // 键的反序列化器props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); // 值的反序列化器props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");// 使用配置创建KafkaConsumer实例KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);// 订阅要消费的主题consumer.subscribe(Arrays.asList("my_topic1"));// 持续消费消息while (true) {// 从Kafka服务器拉取一批消息ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));// 遍历并处理收到的消息记录for (ConsumerRecord<String, String> record : records)System.out.printf("offset = %d,partition: %d,value = %s%n",record.offset(),record.partition(), record.value());}}}
offset = 0,partition: 2,value = 我是2
offset = 1,partition: 2,value = 我是5
offset = 2,partition: 2,value = 我是8
offset = 3,partition: 2,value = 我是11
offset = 4,partition: 2,value = 我是14
offset = 5,partition: 2,value = 我是17
1.2.3、创建消费者 KafkaConsumer2Group1 并指定组 my_group1
package com.atguigu.kafka.consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;
public class KafkaConsumer2Group1 {/*** 主函数入口,创建并运行一个Kafka消费者来消费主题"foo"和"bar"的消息。** @param args 命令行参数(未使用)*/public static void main(String[] args) {// 初始化Kafka消费者配置Properties props = new Properties();props.setProperty("bootstrap.servers", "192.168.74.148:9092"); // Kafka broker的地址和端口props.setProperty("group.id", "my_group1"); // 消费者组IDprops.setProperty("enable.auto.commit", "true"); // 自动提交偏移量props.setProperty("auto.commit.interval.ms", "1000"); // 自动提交偏移量的时间间隔props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); // 键的反序列化器props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); // 值的反序列化器props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");// 使用配置创建KafkaConsumer实例KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);// 订阅要消费的主题consumer.subscribe(Arrays.asList("my_topic1"));// 持续消费消息while (true) {// 从Kafka服务器拉取一批消息ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));// 遍历并处理收到的消息记录for (ConsumerRecord<String, String> record : records)System.out.printf("offset = %d,partition: %d,value = %s%n",record.offset(),record.partition(), record.value());}}}
offset = 0,partition: 1,value = 我是1
offset = 1,partition: 1,value = 我是4
offset = 2,partition: 1,value = 我是7
offset = 3,partition: 1,value = 我是10
offset = 4,partition: 1,value = 我是13
offset = 5,partition: 1,value = 我是16
offset = 6,partition: 1,value = 我是19
1.2.3、创建消费者 KafkaConsumer3Group1 并指定组 my_group1
package com.atguigu.kafka.consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;
public class KafkaConsumer3Group1 {/*** 主函数入口,创建并运行一个Kafka消费者来消费主题"foo"和"bar"的消息。** @param args 命令行参数(未使用)*/public static void main(String[] args) {// 初始化Kafka消费者配置Properties props = new Properties();props.setProperty("bootstrap.servers", "192.168.74.148:9092"); // Kafka broker的地址和端口props.setProperty("group.id", "my_group1"); // 消费者组IDprops.setProperty("enable.auto.commit", "true"); // 自动提交偏移量props.setProperty("auto.commit.interval.ms", "1000"); // 自动提交偏移量的时间间隔props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); // 键的反序列化器props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); // 值的反序列化器props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");// 使用配置创建KafkaConsumer实例KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);// 订阅要消费的主题consumer.subscribe(Arrays.asList("my_topic1"));// 持续消费消息while (true) {// 从Kafka服务器拉取一批消息ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));// 遍历并处理收到的消息记录for (ConsumerRecord<String, String> record : records)System.out.printf("offset = %d,partition: %d,value = %s%n",record.offset(),record.partition(), record.value());}}}
offset = 0,partition: 0,value = 我是0
offset = 1,partition: 0,value = 我是3
offset = 2,partition: 0,value = 我是6
offset = 3,partition: 0,value = 我是9
offset = 4,partition: 0,value = 我是12
offset = 5,partition: 0,value = 我是15
offset = 6,partition: 0,value = 我是18
1.2.4、创建消费者 KafkaConsumer1Group2 并指定组 my_group2
package com.atguigu.kafka.consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;
public class KafkaConsumer1Group2 {/*** 主函数入口,创建并运行一个Kafka消费者来消费主题"foo"和"bar"的消息。** @param args 命令行参数(未使用)*/public static void main(String[] args) {// 初始化Kafka消费者配置Properties props = new Properties();props.setProperty("bootstrap.servers", "192.168.74.148:9092"); // Kafka broker的地址和端口props.setProperty("group.id", "my_group2"); // 消费者组IDprops.setProperty("enable.auto.commit", "true"); // 自动提交偏移量props.setProperty("auto.commit.interval.ms", "1000"); // 自动提交偏移量的时间间隔props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); // 键的反序列化器props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); // 值的反序列化器props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");// 使用配置创建KafkaConsumer实例KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);// 订阅要消费的主题consumer.subscribe(Arrays.asList("my_topic1"));// 持续消费消息while (true) {// 从Kafka服务器拉取一批消息ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));// 遍历并处理收到的消息记录for (ConsumerRecord<String, String> record : records)System.out.printf("offset = %d,partition: %d,value = %s%n",record.offset(),record.partition(), record.value());}}}
offset = 0,partition: 2,value = 我是2
offset = 1,partition: 2,value = 我是5
offset = 2,partition: 2,value = 我是8
offset = 3,partition: 2,value = 我是11
offset = 4,partition: 2,value = 我是14
offset = 5,partition: 2,value = 我是17
offset = 0,partition: 1,value = 我是1
offset = 1,partition: 1,value = 我是4
offset = 2,partition: 1,value = 我是7
offset = 3,partition: 1,value = 我是10
offset = 4,partition: 1,value = 我是13
offset = 5,partition: 1,value = 我是16
offset = 6,partition: 1,value = 我是19
offset = 0,partition: 0,value = 我是0
offset = 1,partition: 0,value = 我是3
offset = 2,partition: 0,value = 我是6
offset = 3,partition: 0,value = 我是9
offset = 4,partition: 0,value = 我是12
offset = 5,partition: 0,value = 我是15
offset = 6,partition: 0,value = 我是18
1.3、eagle for apache kafka
1.3.1、查看分区0的数据
[[{"partition": 0,"offset": 0,"msg": "我是0","timespan": 1717226677707,"date": "2024-06-01 07:24:37"},{"partition": 0,"offset": 1,"msg": "我是3","timespan": 1717226677720,"date": "2024-06-01 07:24:37"},{"partition": 0,"offset": 2,"msg": "我是6","timespan": 1717226677721,"date": "2024-06-01 07:24:37"},{"partition": 0,"offset": 3,"msg": "我是9","timespan": 1717226677721,"date": "2024-06-01 07:24:37"},{"partition": 0,"offset": 4,"msg": "我是12","timespan": 1717226677721,"date": "2024-06-01 07:24:37"},{"partition": 0,"offset": 5,"msg": "我是15","timespan": 1717226677722,"date": "2024-06-01 07:24:37"},{"partition": 0,"offset": 6,"msg": "我是18","timespan": 1717226677722,"date": "2024-06-01 07:24:37"}]
]
1.3.2、查看分区1的数据
[[{"partition": 1,"offset": 0,"msg": "我是1","timespan": 1717226677720,"date": "2024-06-01 07:24:37"},{"partition": 1,"offset": 1,"msg": "我是4","timespan": 1717226677721,"date": "2024-06-01 07:24:37"},{"partition": 1,"offset": 2,"msg": "我是7","timespan": 1717226677721,"date": "2024-06-01 07:24:37"},{"partition": 1,"offset": 3,"msg": "我是10","timespan": 1717226677721,"date": "2024-06-01 07:24:37"},{"partition": 1,"offset": 4,"msg": "我是13","timespan": 1717226677721,"date": "2024-06-01 07:24:37"},{"partition": 1,"offset": 5,"msg": "我是16","timespan": 1717226677722,"date": "2024-06-01 07:24:37"},{"partition": 1,"offset": 6,"msg": "我是19","timespan": 1717226677722,"date": "2024-06-01 07:24:37"}]
]
1.3.3、查看分区2的数据
[[{"partition": 2,"offset": 0,"msg": "我是2","timespan": 1717226677720,"date": "2024-06-01 07:24:37"},{"partition": 2,"offset": 1,"msg": "我是5","timespan": 1717226677721,"date": "2024-06-01 07:24:37"},{"partition": 2,"offset": 2,"msg": "我是8","timespan": 1717226677721,"date": "2024-06-01 07:24:37"},{"partition": 2,"offset": 3,"msg": "我是11","timespan": 1717226677721,"date": "2024-06-01 07:24:37"},{"partition": 2,"offset": 4,"msg": "我是14","timespan": 1717226677721,"date": "2024-06-01 07:24:37"},{"partition": 2,"offset": 5,"msg": "我是17","timespan": 1717226677722,"date": "2024-06-01 07:24:37"}]
]