Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- org.apache.kafka.streams.errors.StreamsException: task [2_13] Abort sending since an error caught with a previous record (key 264344038933 value [B@359a4692 timestamp 1563412782970) to topic production-smoke-KTABLE-SUPPRESS-STATE-STORE-0000000021-changelog due to org.apache.kafka.common.errors.TimeoutException: Failed to update metadata after 60000 ms.
- You can increase producer parameter `retries` and `retry.backoff.ms` to avoid this error.```
- buffered.records.per.partition = 1000
- cache.max.bytes.buffering = 10485760
- commit.interval.ms = 30000
- connections.max.idle.ms = 540000
- max.task.idle.ms = 0
- metadata.max.age.ms = 300000
- metric.reporters = []
- metrics.num.samples = 2
- metrics.sample.window.ms = 30000
- num.standby.replicas = 0
- num.stream.threads = 1
- partition.grouper = class org.apache.kafka.streams.processor.DefaultPartitionGrouper
- poll.ms = 100
- processing.guarantee = at_least_once
- receive.buffer.bytes = 32768
- reconnect.backoff.max.ms = 1000
- reconnect.backoff.ms = 50
- replication.factor = 1
- request.timeout.ms = 60000
- retries = 20
- retry.backoff.ms = 60000
- rocksdb.config.setter = null
- send.buffer.bytes = 131072
- state.cleanup.delay.ms = 600000
- state.dir = /tmp/kafka-streams
- topology.optimization = none
- upgrade.from = null
- windowstore.changelog.additional.retention.ms = 86400000
- auto.commit.interval.ms = 5000
- auto.offset.reset = none
- check.crcs = true
- client.dns.lookup = default
- connections.max.idle.ms = 540000
- default.api.timeout.ms = 60000
- enable.auto.commit = false
- exclude.internal.topics = true
- fetch.max.bytes = 52428800
- fetch.max.wait.ms = 500
- fetch.min.bytes = 1
- group.id =
- heartbeat.interval.ms = 3000
- interceptor.classes = []
- internal.leave.group.on.close = false
- isolation.level = read_uncommitted
- max.partition.fetch.bytes = 1048576
- max.poll.interval.ms = 2147483647
- max.poll.records = 1000
- metadata.max.age.ms = 300000
- metric.reporters = []
- metrics.num.samples = 2
- metrics.sample.window.ms = 30000
- partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
- receive.buffer.bytes = 32768
- reconnect.backoff.max.ms = 1000
- reconnect.backoff.ms = 50
- request.timeout.ms = 60000
- retry.backoff.ms = 60000
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement