wulfwhuiwhsc 2020-05-08 09:47 采纳率: 0%
浏览 1277
已结题

kafkaconsumer数据不能批量消费

我用kafkaconsumer批量消费数据, 无法获取获取批量, 从日志看offset提交也异常, 规律性的每3次提交成功一次, 数据每次获取一条, 无法批量获取。
困扰了两天了, 莫名啊。

public class KafkaManualConsumer
{

    public static void main(String[] args)
    {
        Properties properties = new Properties();
        System.setProperty("java.security.auth.login.config", "c:/kafka_client_jaas.conf"); //配置文件路径
        properties.put("security.protocol", "SASL_PLAINTEXT");
        properties.put("sasl.mechanism", "PLAIN");
        properties.put("bootstrap.servers", "VM_0_16_centos:9092");    //kafka:9092
        properties.put("enable.auto.commit", "false"); 
        //properties.put("session.timeout.ms", 60000);
        properties.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 60000);
        properties.put("fetch.max.wait.ms", 5000);
        properties.put("max.poll.records", 5000);
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("group.id", "yuu67u36");
//        properties.put("receive.buffer.bytes", 3276800);
//        properties.put("heartbeat.interval.ms", 59000);
//        properties.put("client.id", "t4t5t234f34f3f");
//        properties.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 32*1024*1024);
//        properties.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, 64*1024*1024);
//        properties.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, 128*1024*1024);
        //properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
//        properties.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, 2000*1024);

        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
        kafkaConsumer.subscribe(Arrays.asList("topic-video-dev-attendphotos"));

        //kafkaConsumer.subscribe(Arrays.asList("topic-video-dev-stat"));


        while (true)
        {

            ConsumerRecords<String, String> records = kafkaConsumer.poll(1000L);


            System.out.println("-----------------");
            System.out.println(records.count());
            for (ConsumerRecord<String, String> record : records)
            {

                System.out.println("offset = " + record.offset());
                VideoPhotoOuter dto = JSON.parseObject(record.value(), VideoPhotoOuter.class);
                System.out.println(dto.getPhotos().get(0).getPhotoFmt());
                //System.out.printf("offset = %d, value = %s", record.offset(), record.value());
            }

            try
            {

                kafkaConsumer.commitSync();
                Thread.currentThread().sleep(1000L);
            }
            catch(Exception ex)
            {
                //手动抛出SQLException使用事务回滚
            }


        }
        //kafkaConsumer.close();

    }
}

下面是控制台日志

17:32:06.758 [main] INFO org.apache.kafka.clients.consumer.ConsumerConfig - ConsumerConfig values: 
    allow.auto.create.topics = true
    auto.commit.interval.ms = 5000
    auto.offset.reset = latest
    bootstrap.servers = [VM_0_16_centos:9092]
    check.crcs = true
    client.dns.lookup = default
    client.id = t4t5t234f34f3f
    client.rack = 
    connections.max.idle.ms = 540000
    default.api.timeout.ms = 60000
    enable.auto.commit = false
    exclude.internal.topics = true
    fetch.max.bytes = 52428800
    fetch.max.wait.ms = 5000
    fetch.min.bytes = 1
    group.id = yuu67u36
    group.instance.id = null
    heartbeat.interval.ms = 59000
    interceptor.classes = []
    internal.leave.group.on.close = true
    isolation.level = read_uncommitted
    key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
    max.partition.fetch.bytes = 1048576
    max.poll.interval.ms = 300000
    max.poll.records = 5000
    metadata.max.age.ms = 300000
    metric.reporters = []
    metrics.num.samples = 2
    metrics.recording.level = INFO
    metrics.sample.window.ms = 30000
    partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
    receive.buffer.bytes = 3276800
    reconnect.backoff.max.ms = 1000
    reconnect.backoff.ms = 50
    request.timeout.ms = 60000
    retry.backoff.ms = 100
    sasl.client.callback.handler.class = null
    sasl.jaas.config = null
    sasl.kerberos.kinit.cmd = /usr/bin/kinit
    sasl.kerberos.min.time.before.relogin = 60000
    sasl.kerberos.service.name = null
    sasl.kerberos.ticket.renew.jitter = 0.05
    sasl.kerberos.ticket.renew.window.factor = 0.8
    sasl.login.callback.handler.class = null
    sasl.login.class = null
    sasl.login.refresh.buffer.seconds = 300
    sasl.login.refresh.min.period.seconds = 60
    sasl.login.refresh.window.factor = 0.8
    sasl.login.refresh.window.jitter = 0.05
    sasl.mechanism = PLAIN
    security.protocol = SASL_PLAINTEXT
    send.buffer.bytes = 131072
    session.timeout.ms = 60000
    ssl.cipher.suites = null
    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
    ssl.endpoint.identification.algorithm = https
    ssl.key.password = null
    ssl.keymanager.algorithm = SunX509
    ssl.keystore.location = null
    ssl.keystore.password = null
    ssl.keystore.type = JKS
    ssl.protocol = TLS
    ssl.provider = null
    ssl.secure.random.implementation = null
    ssl.trustmanager.algorithm = PKIX
    ssl.truststore.location = null
    ssl.truststore.password = null
    ssl.truststore.type = JKS
    value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
.......................
........................
-----------------
0
17:32:08.068 [main] DEBUG org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Committed offset 90261 for partition topic-video-dev-attendphotos-0
-----------------
0
17:32:10.095 [main] DEBUG org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Committed offset 90261 for partition topic-video-dev-attendphotos-0
17:32:12.091 [main] DEBUG org.apache.kafka.clients.FetchSessionHandler - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Node 90 sent a full fetch response that created a new incremental fetch session 725149318 with 1 response partition(s)
17:32:12.092 [main] DEBUG org.apache.kafka.clients.consumer.internals.Fetcher - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Fetch READ_UNCOMMITTED at offset 90261 for partition topic-video-dev-attendphotos-0 returned fetch data (error=NONE, highWaterMark=93666, lastStableOffset = 93666, logStartOffset = 5372, preferredReadReplica = absent, abortedTransactions = null, recordsSizeInBytes=1048576)
17:32:12.120 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name topic.topic-video-dev-attendphotos.bytes-fetched
17:32:12.120 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name topic.topic-video-dev-attendphotos.records-fetched
17:32:12.121 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name topic-video-dev-attendphotos-0.records-lag
17:32:12.121 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name topic-video-dev-attendphotos-0.records-lead
17:32:12.122 [main] DEBUG org.apache.kafka.clients.consumer.internals.Fetcher - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Added READ_UNCOMMITTED fetch request for partition topic-video-dev-attendphotos-0 at position FetchPosition{offset=90262, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=VM_0_16_centos:9092 (id: 90 rack: null), epoch=0}} to node VM_0_16_centos:9092 (id: 90 rack: null)
17:32:12.122 [main] DEBUG org.apache.kafka.clients.FetchSessionHandler - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Built incremental fetch (sessionId=725149318, epoch=1) for node 90. Added 0 partition(s), altered 1 partition(s), removed 0 partition(s) out of 1 partition(s)
17:32:12.122 [main] DEBUG org.apache.kafka.clients.consumer.internals.Fetcher - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(topic-video-dev-attendphotos-0), toForget=(), implied=()) to broker VM_0_16_centos:9092 (id: 90 rack: null)
-----------------
1
offset = 90261
JPG
17:32:12.239 [main] DEBUG org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Committed offset 90262 for partition topic-video-dev-attendphotos-0
-----------------
0
17:32:14.256 [main] DEBUG org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Committed offset 90262 for partition topic-video-dev-attendphotos-0
-----------------
0
17:32:16.279 [main] DEBUG org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Committed offset 90262 for partition topic-video-dev-attendphotos-0
17:32:16.603 [kafka-coordinator-heartbeat-thread | yuu67u36] DEBUG org.apache.kafka.clients.FetchSessionHandler - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Node 90 sent an incremental fetch response for session 725149318 with 1 response partition(s)
17:32:16.603 [kafka-coordinator-heartbeat-thread | yuu67u36] DEBUG org.apache.kafka.clients.consumer.internals.Fetcher - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Fetch READ_UNCOMMITTED at offset 90262 for partition topic-video-dev-attendphotos-0 returned fetch data (error=NONE, highWaterMark=93668, lastStableOffset = 93668, logStartOffset = 5372, preferredReadReplica = absent, abortedTransactions = null, recordsSizeInBytes=1048576)
17:32:17.280 [main] DEBUG org.apache.kafka.clients.consumer.internals.Fetcher - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Added READ_UNCOMMITTED fetch request for partition topic-video-dev-attendphotos-0 at position FetchPosition{offset=90263, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=VM_0_16_centos:9092 (id: 90 rack: null), epoch=0}} to node VM_0_16_centos:9092 (id: 90 rack: null)
17:32:17.281 [main] DEBUG org.apache.kafka.clients.FetchSessionHandler - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Built incremental fetch (sessionId=725149318, epoch=2) for node 90. Added 0 partition(s), altered 1 partition(s), removed 0 partition(s) out of 1 partition(s)
17:32:17.281 [main] DEBUG org.apache.kafka.clients.consumer.internals.Fetcher - [Consumer clientId=t4t5t234f34f3f, groupId=yuu67u36] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(topic-video-dev-attendphotos-0), toForget=(), implied=()) to broker VM_0_16_centos:9092 (id: 90 rack: null)
-----------------
1



  • 写回答

1条回答 默认 最新

  • dabocaiqq 2020-05-08 12:15
    关注
    评论

报告相同问题?

悬赏问题

  • ¥15 #MATLAB仿真#车辆换道路径规划
  • ¥15 java 操作 elasticsearch 8.1 实现 索引的重建
  • ¥15 数据可视化Python
  • ¥15 要给毕业设计添加扫码登录的功能!!有偿
  • ¥15 kafka 分区副本增加会导致消息丢失或者不可用吗?
  • ¥15 微信公众号自制会员卡没有收款渠道啊
  • ¥100 Jenkins自动化部署—悬赏100元
  • ¥15 关于#python#的问题:求帮写python代码
  • ¥20 MATLAB画图图形出现上下震荡的线条
  • ¥15 关于#windows#的问题:怎么用WIN 11系统的电脑 克隆WIN NT3.51-4.0系统的硬盘