ruby-kafka-temp-fork 0.0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (144) hide show
  1. checksums.yaml +7 -0
  2. data/.circleci/config.yml +393 -0
  3. data/.github/workflows/stale.yml +19 -0
  4. data/.gitignore +13 -0
  5. data/.readygo +1 -0
  6. data/.rspec +3 -0
  7. data/.rubocop.yml +44 -0
  8. data/.ruby-version +1 -0
  9. data/.yardopts +3 -0
  10. data/CHANGELOG.md +310 -0
  11. data/Gemfile +5 -0
  12. data/ISSUE_TEMPLATE.md +23 -0
  13. data/LICENSE.txt +176 -0
  14. data/Procfile +2 -0
  15. data/README.md +1342 -0
  16. data/Rakefile +8 -0
  17. data/benchmarks/message_encoding.rb +23 -0
  18. data/bin/console +8 -0
  19. data/bin/setup +5 -0
  20. data/docker-compose.yml +39 -0
  21. data/examples/consumer-group.rb +35 -0
  22. data/examples/firehose-consumer.rb +64 -0
  23. data/examples/firehose-producer.rb +54 -0
  24. data/examples/simple-consumer.rb +34 -0
  25. data/examples/simple-producer.rb +42 -0
  26. data/examples/ssl-producer.rb +44 -0
  27. data/lib/kafka.rb +373 -0
  28. data/lib/kafka/async_producer.rb +291 -0
  29. data/lib/kafka/broker.rb +217 -0
  30. data/lib/kafka/broker_info.rb +16 -0
  31. data/lib/kafka/broker_pool.rb +41 -0
  32. data/lib/kafka/broker_uri.rb +43 -0
  33. data/lib/kafka/client.rb +833 -0
  34. data/lib/kafka/cluster.rb +513 -0
  35. data/lib/kafka/compression.rb +45 -0
  36. data/lib/kafka/compressor.rb +86 -0
  37. data/lib/kafka/connection.rb +223 -0
  38. data/lib/kafka/connection_builder.rb +33 -0
  39. data/lib/kafka/consumer.rb +642 -0
  40. data/lib/kafka/consumer_group.rb +231 -0
  41. data/lib/kafka/consumer_group/assignor.rb +63 -0
  42. data/lib/kafka/crc32_hash.rb +15 -0
  43. data/lib/kafka/datadog.rb +420 -0
  44. data/lib/kafka/digest.rb +22 -0
  45. data/lib/kafka/fetch_operation.rb +115 -0
  46. data/lib/kafka/fetched_batch.rb +58 -0
  47. data/lib/kafka/fetched_batch_generator.rb +120 -0
  48. data/lib/kafka/fetched_message.rb +48 -0
  49. data/lib/kafka/fetched_offset_resolver.rb +48 -0
  50. data/lib/kafka/fetcher.rb +224 -0
  51. data/lib/kafka/gzip_codec.rb +34 -0
  52. data/lib/kafka/heartbeat.rb +25 -0
  53. data/lib/kafka/instrumenter.rb +38 -0
  54. data/lib/kafka/interceptors.rb +33 -0
  55. data/lib/kafka/lz4_codec.rb +27 -0
  56. data/lib/kafka/message_buffer.rb +87 -0
  57. data/lib/kafka/murmur2_hash.rb +17 -0
  58. data/lib/kafka/offset_manager.rb +259 -0
  59. data/lib/kafka/partitioner.rb +40 -0
  60. data/lib/kafka/pause.rb +92 -0
  61. data/lib/kafka/pending_message.rb +29 -0
  62. data/lib/kafka/pending_message_queue.rb +41 -0
  63. data/lib/kafka/produce_operation.rb +205 -0
  64. data/lib/kafka/producer.rb +528 -0
  65. data/lib/kafka/prometheus.rb +316 -0
  66. data/lib/kafka/protocol.rb +225 -0
  67. data/lib/kafka/protocol/add_offsets_to_txn_request.rb +29 -0
  68. data/lib/kafka/protocol/add_offsets_to_txn_response.rb +21 -0
  69. data/lib/kafka/protocol/add_partitions_to_txn_request.rb +34 -0
  70. data/lib/kafka/protocol/add_partitions_to_txn_response.rb +47 -0
  71. data/lib/kafka/protocol/alter_configs_request.rb +44 -0
  72. data/lib/kafka/protocol/alter_configs_response.rb +49 -0
  73. data/lib/kafka/protocol/api_versions_request.rb +21 -0
  74. data/lib/kafka/protocol/api_versions_response.rb +53 -0
  75. data/lib/kafka/protocol/consumer_group_protocol.rb +19 -0
  76. data/lib/kafka/protocol/create_partitions_request.rb +42 -0
  77. data/lib/kafka/protocol/create_partitions_response.rb +28 -0
  78. data/lib/kafka/protocol/create_topics_request.rb +45 -0
  79. data/lib/kafka/protocol/create_topics_response.rb +26 -0
  80. data/lib/kafka/protocol/decoder.rb +175 -0
  81. data/lib/kafka/protocol/delete_topics_request.rb +33 -0
  82. data/lib/kafka/protocol/delete_topics_response.rb +26 -0
  83. data/lib/kafka/protocol/describe_configs_request.rb +35 -0
  84. data/lib/kafka/protocol/describe_configs_response.rb +73 -0
  85. data/lib/kafka/protocol/describe_groups_request.rb +27 -0
  86. data/lib/kafka/protocol/describe_groups_response.rb +73 -0
  87. data/lib/kafka/protocol/encoder.rb +184 -0
  88. data/lib/kafka/protocol/end_txn_request.rb +29 -0
  89. data/lib/kafka/protocol/end_txn_response.rb +19 -0
  90. data/lib/kafka/protocol/fetch_request.rb +70 -0
  91. data/lib/kafka/protocol/fetch_response.rb +136 -0
  92. data/lib/kafka/protocol/find_coordinator_request.rb +29 -0
  93. data/lib/kafka/protocol/find_coordinator_response.rb +29 -0
  94. data/lib/kafka/protocol/heartbeat_request.rb +27 -0
  95. data/lib/kafka/protocol/heartbeat_response.rb +17 -0
  96. data/lib/kafka/protocol/init_producer_id_request.rb +26 -0
  97. data/lib/kafka/protocol/init_producer_id_response.rb +27 -0
  98. data/lib/kafka/protocol/join_group_request.rb +47 -0
  99. data/lib/kafka/protocol/join_group_response.rb +41 -0
  100. data/lib/kafka/protocol/leave_group_request.rb +25 -0
  101. data/lib/kafka/protocol/leave_group_response.rb +17 -0
  102. data/lib/kafka/protocol/list_groups_request.rb +23 -0
  103. data/lib/kafka/protocol/list_groups_response.rb +35 -0
  104. data/lib/kafka/protocol/list_offset_request.rb +53 -0
  105. data/lib/kafka/protocol/list_offset_response.rb +89 -0
  106. data/lib/kafka/protocol/member_assignment.rb +42 -0
  107. data/lib/kafka/protocol/message.rb +172 -0
  108. data/lib/kafka/protocol/message_set.rb +55 -0
  109. data/lib/kafka/protocol/metadata_request.rb +31 -0
  110. data/lib/kafka/protocol/metadata_response.rb +185 -0
  111. data/lib/kafka/protocol/offset_commit_request.rb +47 -0
  112. data/lib/kafka/protocol/offset_commit_response.rb +29 -0
  113. data/lib/kafka/protocol/offset_fetch_request.rb +38 -0
  114. data/lib/kafka/protocol/offset_fetch_response.rb +56 -0
  115. data/lib/kafka/protocol/produce_request.rb +94 -0
  116. data/lib/kafka/protocol/produce_response.rb +63 -0
  117. data/lib/kafka/protocol/record.rb +88 -0
  118. data/lib/kafka/protocol/record_batch.rb +223 -0
  119. data/lib/kafka/protocol/request_message.rb +26 -0
  120. data/lib/kafka/protocol/sasl_handshake_request.rb +33 -0
  121. data/lib/kafka/protocol/sasl_handshake_response.rb +28 -0
  122. data/lib/kafka/protocol/sync_group_request.rb +33 -0
  123. data/lib/kafka/protocol/sync_group_response.rb +26 -0
  124. data/lib/kafka/protocol/txn_offset_commit_request.rb +46 -0
  125. data/lib/kafka/protocol/txn_offset_commit_response.rb +47 -0
  126. data/lib/kafka/round_robin_assignment_strategy.rb +52 -0
  127. data/lib/kafka/sasl/gssapi.rb +76 -0
  128. data/lib/kafka/sasl/oauth.rb +64 -0
  129. data/lib/kafka/sasl/plain.rb +39 -0
  130. data/lib/kafka/sasl/scram.rb +180 -0
  131. data/lib/kafka/sasl_authenticator.rb +61 -0
  132. data/lib/kafka/snappy_codec.rb +29 -0
  133. data/lib/kafka/socket_with_timeout.rb +96 -0
  134. data/lib/kafka/ssl_context.rb +66 -0
  135. data/lib/kafka/ssl_socket_with_timeout.rb +188 -0
  136. data/lib/kafka/statsd.rb +296 -0
  137. data/lib/kafka/tagged_logger.rb +77 -0
  138. data/lib/kafka/transaction_manager.rb +306 -0
  139. data/lib/kafka/transaction_state_machine.rb +72 -0
  140. data/lib/kafka/version.rb +5 -0
  141. data/lib/kafka/zstd_codec.rb +27 -0
  142. data/lib/ruby-kafka-temp-fork.rb +5 -0
  143. data/ruby-kafka-temp-fork.gemspec +54 -0
  144. metadata +520 -0
data/Rakefile ADDED
@@ -0,0 +1,8 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "bundler/gem_tasks"
4
+ require "rspec/core/rake_task"
5
+
6
+ RSpec::Core::RakeTask.new(:spec)
7
+
8
+ task default: :spec
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "kafka"
4
+
5
+ ready "message serialization" do
6
+ before do
7
+ message = Kafka::Protocol::Message.new(
8
+ value: "hello",
9
+ key: "world",
10
+ )
11
+
12
+ @io = StringIO.new
13
+ encoder = Kafka::Protocol::Encoder.new(@io)
14
+ message.encode(encoder)
15
+
16
+ @decoder = Kafka::Protocol::Decoder.new(@io)
17
+ end
18
+
19
+ go "decoding" do
20
+ @io.rewind
21
+ Kafka::Protocol::Message.decode(@decoder)
22
+ end
23
+ end
data/bin/console ADDED
@@ -0,0 +1,8 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ require "bundler/setup"
5
+ require "kafka"
6
+
7
+ require "pry"
8
+ Pry.start
data/bin/setup ADDED
@@ -0,0 +1,5 @@
1
+ #!/bin/bash
2
+ set -euo pipefail
3
+ IFS=$'\n\t'
4
+
5
+ bundle install
@@ -0,0 +1,39 @@
1
+ version: '2'
2
+ services:
3
+ zookeeper:
4
+ image: wurstmeister/zookeeper
5
+ ports:
6
+ - "2181:2181"
7
+ kafka1:
8
+ image: wurstmeister/kafka:0.11.0.1
9
+ ports:
10
+ - "9092:9092"
11
+ environment:
12
+ KAFKA_BROKER_ID: 1
13
+ KAFKA_ADVERTISED_HOST_NAME: 192.168.99.100
14
+ KAFKA_ADVERTISED_PORT: 9092
15
+ KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
16
+ volumes:
17
+ - /var/run/docker.sock:/var/run/docker.sock
18
+ kafka2:
19
+ image: wurstmeister/kafka:0.11.0.1
20
+ ports:
21
+ - "9093:9092"
22
+ environment:
23
+ KAFKA_BROKER_ID: 2
24
+ KAFKA_ADVERTISED_HOST_NAME: 192.168.99.100
25
+ KAFKA_ADVERTISED_PORT: 9093
26
+ KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
27
+ volumes:
28
+ - /var/run/docker.sock:/var/run/docker.sock
29
+ kafka3:
30
+ image: wurstmeister/kafka:0.11.0.1
31
+ ports:
32
+ - "9094:9092"
33
+ environment:
34
+ KAFKA_BROKER_ID: 3
35
+ KAFKA_ADVERTISED_HOST_NAME: 192.168.99.100
36
+ KAFKA_ADVERTISED_PORT: 9094
37
+ KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
38
+ volumes:
39
+ - /var/run/docker.sock:/var/run/docker.sock
@@ -0,0 +1,35 @@
1
+ # frozen_string_literal: true
2
+
3
+ $LOAD_PATH.unshift(File.expand_path("../../lib", __FILE__))
4
+
5
+ require "kafka"
6
+
7
+ logger = Logger.new(STDOUT)
8
+ brokers = ENV.fetch("KAFKA_BROKERS", "localhost:9092").split(",")
9
+
10
+ # Make sure to create this topic in your Kafka cluster or configure the
11
+ # cluster to auto-create topics.
12
+ topic = "text"
13
+
14
+ kafka = Kafka.new(
15
+ seed_brokers: brokers,
16
+ client_id: "test",
17
+ socket_timeout: 20,
18
+ logger: logger,
19
+ )
20
+
21
+ consumer = kafka.consumer(group_id: "test")
22
+ consumer.subscribe(topic)
23
+
24
+ trap("TERM") { consumer.stop }
25
+ trap("INT") { consumer.stop }
26
+
27
+ begin
28
+ consumer.each_message do |message|
29
+ end
30
+ rescue Kafka::ProcessingError => e
31
+ warn "Got #{e.cause}"
32
+ consumer.pause(e.topic, e.partition, timeout: 20)
33
+
34
+ retry
35
+ end
@@ -0,0 +1,64 @@
1
+ # frozen_string_literal: true
2
+
3
+ $LOAD_PATH.unshift(File.expand_path("../../lib", __FILE__))
4
+
5
+ require "kafka"
6
+ require "dotenv"
7
+
8
+ Dotenv.load
9
+
10
+ KAFKA_CLIENT_CERT = ENV.fetch("KAFKA_CLIENT_CERT")
11
+ KAFKA_CLIENT_CERT_KEY = ENV.fetch("KAFKA_CLIENT_CERT_KEY")
12
+ KAFKA_SERVER_CERT = ENV.fetch("KAFKA_SERVER_CERT")
13
+ KAFKA_URL = ENV.fetch("KAFKA_URL")
14
+ KAFKA_BROKERS = KAFKA_URL
15
+ KAFKA_TOPIC = "test-messages"
16
+
17
+ NUM_THREADS = 4
18
+
19
+ queue = Queue.new
20
+
21
+ threads = NUM_THREADS.times.map do |worker_id|
22
+ Thread.new do
23
+ logger = Logger.new($stderr)
24
+ logger.level = Logger::INFO
25
+
26
+ logger.formatter = proc {|severity, datetime, progname, msg|
27
+ "[#{worker_id}] #{severity.ljust(5)} -- #{msg}\n"
28
+ }
29
+
30
+ kafka = Kafka.new(
31
+ seed_brokers: KAFKA_BROKERS,
32
+ logger: logger,
33
+ connect_timeout: 30,
34
+ socket_timeout: 30,
35
+ ssl_client_cert: KAFKA_CLIENT_CERT,
36
+ ssl_client_cert_key: KAFKA_CLIENT_CERT_KEY,
37
+ ssl_ca_cert: KAFKA_SERVER_CERT,
38
+ )
39
+
40
+ consumer = kafka.consumer(group_id: "firehose")
41
+ consumer.subscribe(KAFKA_TOPIC)
42
+
43
+ i = 0
44
+ consumer.each_message do |message|
45
+ i += 1
46
+
47
+ if i % 1000 == 0
48
+ queue << i
49
+ i = 0
50
+ end
51
+
52
+ sleep 0.01
53
+ end
54
+ end
55
+ end
56
+
57
+ threads.each {|t| t.abort_on_exception = true }
58
+
59
+ received_messages = 0
60
+
61
+ loop do
62
+ received_messages += queue.pop
63
+ puts "===> Received #{received_messages} messages"
64
+ end
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ $LOAD_PATH.unshift(File.expand_path("../../lib", __FILE__))
4
+
5
+ require "kafka"
6
+ require "dotenv"
7
+
8
+ Dotenv.load
9
+
10
+ KAFKA_CLIENT_CERT = ENV.fetch("KAFKA_CLIENT_CERT")
11
+ KAFKA_CLIENT_CERT_KEY = ENV.fetch("KAFKA_CLIENT_CERT_KEY")
12
+ KAFKA_SERVER_CERT = ENV.fetch("KAFKA_SERVER_CERT")
13
+ KAFKA_URL = ENV.fetch("KAFKA_URL")
14
+ KAFKA_BROKERS = KAFKA_URL
15
+ KAFKA_TOPIC = "test-messages"
16
+
17
+ NUM_THREADS = 20
18
+
19
+ threads = NUM_THREADS.times.map do
20
+ Thread.new do
21
+ logger = Logger.new($stderr)
22
+ logger.level = Logger::INFO
23
+
24
+ kafka = Kafka.new(
25
+ seed_brokers: KAFKA_BROKERS,
26
+ logger: logger,
27
+ ssl_client_cert: KAFKA_CLIENT_CERT,
28
+ ssl_client_cert_key: KAFKA_CLIENT_CERT_KEY,
29
+ ssl_ca_cert: KAFKA_SERVER_CERT,
30
+ )
31
+
32
+ producer = kafka.async_producer(
33
+ delivery_interval: 1,
34
+ max_queue_size: 5_000,
35
+ max_buffer_size: 10_000,
36
+ )
37
+
38
+ begin
39
+ loop do
40
+ producer.produce(rand.to_s, key: rand.to_s, topic: KAFKA_TOPIC)
41
+ end
42
+ rescue Kafka::BufferOverflow
43
+ logger.error "Buffer overflow, backing off for 1s"
44
+ sleep 1
45
+ retry
46
+ ensure
47
+ producer.shutdown
48
+ end
49
+ end
50
+ end
51
+
52
+ threads.each {|t| t.abort_on_exception = true }
53
+
54
+ threads.map(&:join)
@@ -0,0 +1,34 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Consumes lines from a Kafka partition and writes them to STDOUT.
4
+ #
5
+ # You need to define the environment variable KAFKA_BROKERS for this
6
+ # to work, e.g.
7
+ #
8
+ # export KAFKA_BROKERS=localhost:9092
9
+ #
10
+
11
+ $LOAD_PATH.unshift(File.expand_path("../../lib", __FILE__))
12
+
13
+ require "kafka"
14
+
15
+ # We don't want log output to clutter the console. Replace `StringIO.new`
16
+ # with e.g. `$stderr` if you want to see what's happening under the hood.
17
+ logger = Logger.new(StringIO.new)
18
+
19
+ brokers = ENV.fetch("KAFKA_BROKERS").split(",")
20
+
21
+ # Make sure to create this topic in your Kafka cluster or configure the
22
+ # cluster to auto-create topics.
23
+ topic = "text"
24
+
25
+ kafka = Kafka.new(
26
+ seed_brokers: brokers,
27
+ client_id: "simple-consumer",
28
+ socket_timeout: 20,
29
+ logger: logger,
30
+ )
31
+
32
+ kafka.each_message(topic: topic) do |message|
33
+ puts message.value
34
+ end
@@ -0,0 +1,42 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Reads lines from STDIN, writing them to Kafka.
4
+ #
5
+ # You need to define the environment variable KAFKA_BROKERS for this
6
+ # to work, e.g.
7
+ #
8
+ # export KAFKA_BROKERS=localhost:9092
9
+ #
10
+
11
+ $LOAD_PATH.unshift(File.expand_path("../../lib", __FILE__))
12
+
13
+ require "kafka"
14
+
15
+ logger = Logger.new($stderr)
16
+ brokers = ENV.fetch("KAFKA_BROKERS")
17
+
18
+ # Make sure to create this topic in your Kafka cluster or configure the
19
+ # cluster to auto-create topics.
20
+ topic = "text"
21
+
22
+ kafka = Kafka.new(
23
+ seed_brokers: brokers,
24
+ client_id: "simple-producer",
25
+ logger: logger,
26
+ )
27
+
28
+ producer = kafka.producer
29
+
30
+ begin
31
+ $stdin.each_with_index do |line, index|
32
+ producer.produce(line, topic: topic)
33
+
34
+ # Send messages for every 10 lines.
35
+ producer.deliver_messages if index % 10 == 0
36
+ end
37
+ ensure
38
+ # Make sure to send any remaining messages.
39
+ producer.deliver_messages
40
+
41
+ producer.shutdown
42
+ end
@@ -0,0 +1,44 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Reads lines from STDIN, writing them to Kafka.
4
+
5
+ $LOAD_PATH.unshift(File.expand_path("../../lib", __FILE__))
6
+
7
+ require "kafka"
8
+
9
+ logger = Logger.new($stderr)
10
+ brokers = ENV.fetch("KAFKA_BROKERS")
11
+
12
+ # Make sure to create this topic in your Kafka cluster or configure the
13
+ # cluster to auto-create topics.
14
+ topic = "page-visits"
15
+
16
+ ssl_context = OpenSSL::SSL::SSLContext.new
17
+ ssl_context.set_params(
18
+ cert: OpenSSL::X509::Certificate.new(ENV.fetch("KAFKA_CLIENT_CERT")),
19
+ key: OpenSSL::PKey::RSA.new(ENV.fetch("KAFKA_CLIENT_CERT_KEY")),
20
+ )
21
+
22
+ kafka = Kafka.new(
23
+ seed_brokers: brokers,
24
+ client_id: "ssl-producer",
25
+ logger: logger,
26
+ ssl: true,
27
+ ssl_context: ssl_context,
28
+ )
29
+
30
+ producer = kafka.producer
31
+
32
+ begin
33
+ $stdin.each_with_index do |line, index|
34
+ producer.produce(line, topic: topic)
35
+
36
+ # Send messages for every 10 lines.
37
+ producer.deliver_messages if index % 10 == 0
38
+ end
39
+ ensure
40
+ # Make sure to send any remaining messages.
41
+ producer.deliver_messages
42
+
43
+ producer.shutdown
44
+ end
data/lib/kafka.rb ADDED
@@ -0,0 +1,373 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "kafka/version"
4
+
5
+ module Kafka
6
+ class Error < StandardError
7
+ end
8
+
9
+ # There was an error processing a message.
10
+ class ProcessingError < Error
11
+ attr_reader :topic, :partition, :offset
12
+
13
+ def initialize(topic, partition, offset)
14
+ @topic = topic
15
+ @partition = partition
16
+ @offset = offset
17
+
18
+ super()
19
+ end
20
+ end
21
+
22
+ # Subclasses of this exception class map to an error code described in the
23
+ # Kafka protocol specification.
24
+ # https://kafka.apache.org/protocol#protocol_error_codes
25
+ class ProtocolError < Error
26
+ end
27
+
28
+ # -1
29
+ # The server experienced an unexpected error when processing the request
30
+ class UnknownError < ProtocolError
31
+ end
32
+
33
+ # 1
34
+ # The requested offset is not within the range of offsets maintained by the server.
35
+ class OffsetOutOfRange < ProtocolError
36
+ attr_accessor :topic, :partition, :offset
37
+ end
38
+
39
+ # 2
40
+ # This indicates that a message contents does not match its CRC.
41
+ class CorruptMessage < ProtocolError
42
+ end
43
+
44
+ # 3
45
+ # The request is for a topic or partition that does not exist on the broker.
46
+ class UnknownTopicOrPartition < ProtocolError
47
+ end
48
+
49
+ # 4
50
+ # The message has a negative size.
51
+ class InvalidMessageSize < ProtocolError
52
+ end
53
+
54
+ # 5
55
+ # This error is thrown if we are in the middle of a leadership election and
56
+ # there is currently no leader for this partition and hence it is unavailable
57
+ # for writes.
58
+ class LeaderNotAvailable < ProtocolError
59
+ end
60
+
61
+ # 6
62
+ # This error is thrown if the client attempts to send messages to a replica
63
+ # that is not the leader for some partition. It indicates that the client's
64
+ # metadata is out of date.
65
+ class NotLeaderForPartition < ProtocolError
66
+ end
67
+
68
+ # 7
69
+ # This error is thrown if the request exceeds the user-specified time limit
70
+ # in the request.
71
+ class RequestTimedOut < ProtocolError
72
+ end
73
+
74
+ # 8
75
+ # The broker is not available.
76
+ class BrokerNotAvailable < ProtocolError
77
+ end
78
+
79
+ # 9
80
+ # Raised if a replica is expected on a broker, but is not. Can be safely ignored.
81
+ class ReplicaNotAvailable < ProtocolError
82
+ end
83
+
84
+ # 10
85
+ # The server has a configurable maximum message size to avoid unbounded memory
86
+ # allocation. This error is thrown if the client attempt to produce a message
87
+ # larger than this maximum.
88
+ class MessageSizeTooLarge < ProtocolError
89
+ end
90
+
91
+ # 11
92
+ # The controller moved to another broker.
93
+ class StaleControllerEpoch < ProtocolError
94
+ end
95
+
96
+ # 12
97
+ # If you specify a string larger than configured maximum for offset metadata.
98
+ class OffsetMetadataTooLarge < ProtocolError
99
+ end
100
+
101
+ # 13
102
+ # The server disconnected before a response was received.
103
+ class NetworkException < ProtocolError
104
+ end
105
+
106
+ # 14
107
+ # The coordinator is loading and hence can't process requests.
108
+ class CoordinatorLoadInProgress < ProtocolError
109
+ end
110
+
111
+ # 15
112
+ # The coordinator is not available.
113
+ class CoordinatorNotAvailable < ProtocolError
114
+ end
115
+
116
+ # 16
117
+ # This is not the correct coordinator.
118
+ class NotCoordinatorForGroup < ProtocolError
119
+ end
120
+
121
+ # 17
122
+ # For a request which attempts to access an invalid topic (e.g. one which has
123
+ # an illegal name), or if an attempt is made to write to an internal topic
124
+ # (such as the consumer offsets topic).
125
+ class InvalidTopic < ProtocolError
126
+ end
127
+
128
+ # 18
129
+ # If a message batch in a produce request exceeds the maximum configured
130
+ # segment size.
131
+ class RecordListTooLarge < ProtocolError
132
+ end
133
+
134
+ # 19
135
+ # Returned from a produce request when the number of in-sync replicas is
136
+ # lower than the configured minimum and requiredAcks is -1.
137
+ class NotEnoughReplicas < ProtocolError
138
+ end
139
+
140
+ # 20
141
+ # Returned from a produce request when the message was written to the log,
142
+ # but with fewer in-sync replicas than required.
143
+ class NotEnoughReplicasAfterAppend < ProtocolError
144
+ end
145
+
146
+ # 21
147
+ # Returned from a produce request if the requested requiredAcks is invalid
148
+ # (anything other than -1, 1, or 0).
149
+ class InvalidRequiredAcks < ProtocolError
150
+ end
151
+
152
+ # 22
153
+ # Specified group generation id is not valid.
154
+ class IllegalGeneration < ProtocolError
155
+ end
156
+
157
+ # 23
158
+ # The group member's supported protocols are incompatible with those of existing members or first group member tried to join with empty protocol type or empty protocol list.
159
+ class InconsistentGroupProtocol < ProtocolError
160
+ end
161
+
162
+ # 24
163
+ # The configured groupId is invalid
164
+ class InvalidGroupId < ProtocolError
165
+ end
166
+
167
+ # 25
168
+ # The coordinator is not aware of this member.
169
+ class UnknownMemberId < ProtocolError
170
+ end
171
+
172
+ # 26
173
+ # The session timeout is not within the range allowed by the broker
174
+ class InvalidSessionTimeout < ProtocolError
175
+ end
176
+
177
+ # 27
178
+ # The group is rebalancing, so a rejoin is needed.
179
+ class RebalanceInProgress < ProtocolError
180
+ end
181
+
182
+ # 28
183
+ # The committing offset data size is not valid
184
+ class InvalidCommitOffsetSize < ProtocolError
185
+ end
186
+
187
+ # 29
188
+ class TopicAuthorizationFailed < ProtocolError
189
+ end
190
+
191
+ # 30
192
+ class GroupAuthorizationFailed < ProtocolError
193
+ end
194
+
195
+ # 31
196
+ class ClusterAuthorizationFailed < ProtocolError
197
+ end
198
+
199
+ # 32
200
+ # The timestamp of the message is out of acceptable range.
201
+ class InvalidTimestamp < ProtocolError
202
+ end
203
+
204
+ # 33
205
+ # The broker does not support the requested SASL mechanism.
206
+ class UnsupportedSaslMechanism < ProtocolError
207
+ end
208
+
209
+ # 34
210
+ class InvalidSaslState < ProtocolError
211
+ end
212
+
213
+ # 35
214
+ class UnsupportedVersion < ProtocolError
215
+ end
216
+
217
+ # 36
218
+ class TopicAlreadyExists < ProtocolError
219
+ end
220
+
221
+ # 37
222
+ # Number of partitions is below 1.
223
+ class InvalidPartitions < ProtocolError
224
+ end
225
+
226
+ # 38
227
+ # Replication factor is below 1 or larger than the number of available brokers.
228
+ class InvalidReplicationFactor < ProtocolError
229
+ end
230
+
231
+ # 39
232
+ class InvalidReplicaAssignment < ProtocolError
233
+ end
234
+
235
+ # 40
236
+ class InvalidConfig < ProtocolError
237
+ end
238
+
239
+ # 41
240
+ # This is not the correct controller for this cluster.
241
+ class NotController < ProtocolError
242
+ end
243
+
244
+ # 42
245
+ class InvalidRequest < ProtocolError
246
+ end
247
+
248
+ # 43
249
+ # The message format version on the broker does not support the request.
250
+ class UnsupportedForMessageFormat < ProtocolError
251
+ end
252
+
253
+ # 44
254
+ # Request parameters do not satisfy the configured policy.
255
+ class PolicyViolation < ProtocolError
256
+ end
257
+
258
+ # 45
259
+ # The broker received an out of order sequence number
260
+ class OutOfOrderSequenceNumberError < Error
261
+ end
262
+
263
+ # 46
264
+ # The broker received a duplicate sequence number
265
+ class DuplicateSequenceNumberError < Error
266
+ end
267
+
268
+ # 47
269
+ # Producer attempted an operation with an old epoch. Either there is a newer producer with the same transactionalId, or the producer's transaction has been expired by the broker.
270
+ class InvalidProducerEpochError < Error
271
+ end
272
+
273
+ # 48
274
+ # The producer attempted a transactional operation in an invalid state
275
+ class InvalidTxnStateError < Error
276
+ end
277
+
278
+ # 49
279
+ # The producer attempted to use a producer id which is not currently assigned to its transactional id
280
+ class InvalidProducerIDMappingError < Error
281
+ end
282
+
283
+ # 50
284
+ # The transaction timeout is larger than the maximum value allowed by the broker (as configured by transaction.max.timeout.ms).
285
+ class InvalidTransactionTimeoutError < Error
286
+ end
287
+
288
+ # 51
289
+ # The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing
290
+ class ConcurrentTransactionError < Error
291
+ end
292
+
293
+ # 52
294
+ # Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer
295
+ class TransactionCoordinatorFencedError < Error
296
+ end
297
+
298
+ ###
299
+ # ruby-kafka errors
300
+ ###
301
+
302
+ # A fetch operation was executed with no partitions specified.
303
+ class NoPartitionsToFetchFrom < Error
304
+ end
305
+
306
+ # A message in a partition is larger than the maximum we've asked for.
307
+ class MessageTooLargeToRead < Error
308
+ end
309
+
310
+ # A connection has been unused for too long, we assume the server has killed it.
311
+ class IdleConnection < Error
312
+ end
313
+
314
+ # When the record array length doesn't match real number of received records
315
+ class InsufficientDataMessage < Error
316
+ end
317
+ # Raised when there's a network connection error.
318
+ class ConnectionError < Error
319
+ end
320
+
321
+ class NoSuchBroker < Error
322
+ end
323
+
324
+ # Raised when a producer buffer has reached its maximum size.
325
+ class BufferOverflow < Error
326
+ end
327
+
328
+ # Raised if not all messages could be sent by a producer.
329
+ class DeliveryFailed < Error
330
+ attr_reader :failed_messages
331
+
332
+ def initialize(message, failed_messages)
333
+ @failed_messages = failed_messages
334
+
335
+ super(message)
336
+ end
337
+ end
338
+
339
+ class HeartbeatError < Error
340
+ end
341
+
342
+ class OffsetCommitError < Error
343
+ end
344
+
345
+ class FetchError < Error
346
+ end
347
+
348
+ class SaslScramError < Error
349
+ end
350
+
351
+ class FailedScramAuthentication < SaslScramError
352
+ end
353
+
354
+ # The Token Provider object used for SASL OAuthBearer does not implement the method `token`
355
+ class TokenMethodNotImplementedError < Error
356
+ end
357
+
358
+ # Initializes a new Kafka client.
359
+ #
360
+ # @see Client#initialize
361
+ # @return [Client]
362
+ def self.new(seed_brokers = nil, **options)
363
+ # We allow `seed_brokers` to be passed in either as a positional _or_ as a
364
+ # keyword argument.
365
+ if seed_brokers.nil?
366
+ Client.new(**options)
367
+ else
368
+ Client.new(seed_brokers: seed_brokers, **options)
369
+ end
370
+ end
371
+ end
372
+
373
+ require "kafka/client"