Advertisement
Guest User

Untitled

a guest
Jul 18th, 2019
78
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.13 KB | None | 0 0
  1. org.apache.kafka.streams.errors.StreamsException: task [2_13] Abort sending since an error caught with a previous record (key 264344038933 value [B@359a4692 timestamp 1563412782970) to topic production-smoke-KTABLE-SUPPRESS-STATE-STORE-0000000021-changelog due to org.apache.kafka.common.errors.TimeoutException: Failed to update metadata after 60000 ms.
  2. You can increase producer parameter `retries` and `retry.backoff.ms` to avoid this error.```
  3.  
  4. buffered.records.per.partition = 1000
  5. cache.max.bytes.buffering = 10485760
  6. commit.interval.ms = 30000
  7. connections.max.idle.ms = 540000
  8. max.task.idle.ms = 0
  9. metadata.max.age.ms = 300000
  10. metric.reporters = []
  11. metrics.num.samples = 2
  12. metrics.sample.window.ms = 30000
  13. num.standby.replicas = 0
  14. num.stream.threads = 1
  15. partition.grouper = class org.apache.kafka.streams.processor.DefaultPartitionGrouper
  16. poll.ms = 100
  17. processing.guarantee = at_least_once
  18. receive.buffer.bytes = 32768
  19. reconnect.backoff.max.ms = 1000
  20. reconnect.backoff.ms = 50
  21. replication.factor = 1
  22. request.timeout.ms = 60000
  23. retries = 20
  24. retry.backoff.ms = 60000
  25. rocksdb.config.setter = null
  26. send.buffer.bytes = 131072
  27. state.cleanup.delay.ms = 600000
  28. state.dir = /tmp/kafka-streams
  29. topology.optimization = none
  30. upgrade.from = null
  31. windowstore.changelog.additional.retention.ms = 86400000
  32.  
  33. auto.commit.interval.ms = 5000
  34. auto.offset.reset = none
  35. check.crcs = true
  36. client.dns.lookup = default
  37. connections.max.idle.ms = 540000
  38. default.api.timeout.ms = 60000
  39. enable.auto.commit = false
  40. exclude.internal.topics = true
  41. fetch.max.bytes = 52428800
  42. fetch.max.wait.ms = 500
  43. fetch.min.bytes = 1
  44. group.id =
  45. heartbeat.interval.ms = 3000
  46. interceptor.classes = []
  47. internal.leave.group.on.close = false
  48. isolation.level = read_uncommitted
  49. max.partition.fetch.bytes = 1048576
  50. max.poll.interval.ms = 2147483647
  51. max.poll.records = 1000
  52. metadata.max.age.ms = 300000
  53. metric.reporters = []
  54. metrics.num.samples = 2
  55. metrics.sample.window.ms = 30000
  56. partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
  57. receive.buffer.bytes = 32768
  58. reconnect.backoff.max.ms = 1000
  59. reconnect.backoff.ms = 50
  60. request.timeout.ms = 60000
  61. retry.backoff.ms = 60000
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement