|
@@ -0,0 +1,77 @@
|
|
|
+package org.example;
|
|
|
+
|
|
|
+import org.apache.kafka.clients.consumer.ConsumerRecord;
|
|
|
+import org.springframework.kafka.annotation.KafkaListener;
|
|
|
+import org.springframework.kafka.annotation.PartitionOffset;
|
|
|
+import org.springframework.kafka.annotation.TopicPartition;
|
|
|
+import org.springframework.kafka.support.Acknowledgment;
|
|
|
+import org.springframework.stereotype.Component;
|
|
|
+import org.springframework.util.CollectionUtils;
|
|
|
+import org.springframework.kafka.support.KafkaHeaders;
|
|
|
+import org.springframework.messaging.handler.annotation.Header;
|
|
|
+import org.springframework.messaging.handler.annotation.Payload;
|
|
|
+
|
|
|
+
|
|
|
+import java.util.ArrayList;
|
|
|
+import java.util.List;
|
|
|
+import java.util.logging.Logger;
|
|
|
+
|
|
|
+@Component
|
|
|
+public class KafkaConsumer {
|
|
|
+ // 消费监听
|
|
|
+// @KafkaListener(topics = {"topic1"})
|
|
|
+// public void onMessage1(ConsumerRecord<?, ?> record){
|
|
|
+// // 消费的哪个topic、partition的消息,打印出消息内容
|
|
|
+// System.out.println("简单消费:"+record.topic()+"-"+record.partition()+"-"+record.value());
|
|
|
+// }
|
|
|
+//
|
|
|
+// /**
|
|
|
+// * @Title 指定topic、partition、offset消费
|
|
|
+// * @Description 同时监听topic1和topic2,监听topic1的0号分区、topic2的 "0号和1号" 分区,指向1号分区的offset初始值为8
|
|
|
+// * @Date 2020/3/22 13:38
|
|
|
+// * @Param [record]
|
|
|
+// * @return void
|
|
|
+// **/
|
|
|
+// @KafkaListener(id = "consumer1",groupId = "felix-group",topicPartitions = {
|
|
|
+// @TopicPartition(topic = "topic1", partitions = { "0" }),
|
|
|
+// @TopicPartition(topic = "topic2", partitionOffsets = @PartitionOffset(partition = "0", initialOffset = "8"))
|
|
|
+// })
|
|
|
+// public void onMessage2(ConsumerRecord<?, ?> record) {
|
|
|
+// System.out.println("topic:"+record.topic()+"|partition:"+record.partition()+"|offset:"+record.offset()+"|value:"+record.value());
|
|
|
+// }
|
|
|
+
|
|
|
+// // 批量,在配置中设置:
|
|
|
+// // # 设置批量消费
|
|
|
+// //spring.kafka.listener.type=batch
|
|
|
+// //# 批量消费每次最多消费多少条消息
|
|
|
+// //spring.kafka.consumer.max-poll-records=5
|
|
|
+// // 注意:修改后其他consumer不能正常接收消息
|
|
|
+// @KafkaListener(topics = {"topic1"})
|
|
|
+// public void listen1(List<String> data) {
|
|
|
+// System.out.println("收到"+ data.size() + "条消息:");
|
|
|
+// System.out.println(data);
|
|
|
+// }
|
|
|
+//
|
|
|
+// // 或者用ConsumerRecord类接收
|
|
|
+// @KafkaListener(topics = {"topic1"})
|
|
|
+// public void listen2(List<ConsumerRecord<String, Object>> records) {
|
|
|
+// System.out.println("收到"+ records.size() + "条消息:");
|
|
|
+// System.out.println(records);
|
|
|
+// }
|
|
|
+
|
|
|
+ // 使用注解方式获取消息头、消息体,则也是使用List来接收
|
|
|
+
|
|
|
+ @KafkaListener(topics = {"topic5"})
|
|
|
+ public void listen2(@Payload List<String> data,
|
|
|
+ @Header(KafkaHeaders.RECEIVED_TOPIC) List<String> topics,
|
|
|
+ @Header(KafkaHeaders.RECEIVED_PARTITION_ID) List<Integer> partitions,
|
|
|
+ @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) List<String> keys,
|
|
|
+ @Header(KafkaHeaders.RECEIVED_TIMESTAMP) List<Long> tss) {
|
|
|
+ System.out.println("收到"+ data.size() + "条消息:");
|
|
|
+ System.out.println("data: "+data);
|
|
|
+ System.out.println("topics: "+topics);
|
|
|
+ System.out.println("topics: "+partitions);
|
|
|
+ System.out.println("keys: "+keys);
|
|
|
+ System.out.println("timestamp: "+tss);
|
|
|
+ }
|
|
|
+}
|