ruby-kafka-aws-iam 1.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (145) hide show
  1. checksums.yaml +7 -0
  2. data/.circleci/config.yml +393 -0
  3. data/.github/workflows/stale.yml +19 -0
  4. data/.gitignore +13 -0
  5. data/.readygo +1 -0
  6. data/.rspec +3 -0
  7. data/.rubocop.yml +44 -0
  8. data/.ruby-version +1 -0
  9. data/.yardopts +3 -0
  10. data/CHANGELOG.md +314 -0
  11. data/Gemfile +5 -0
  12. data/ISSUE_TEMPLATE.md +23 -0
  13. data/LICENSE.txt +176 -0
  14. data/Procfile +2 -0
  15. data/README.md +1356 -0
  16. data/Rakefile +8 -0
  17. data/benchmarks/message_encoding.rb +23 -0
  18. data/bin/console +8 -0
  19. data/bin/setup +5 -0
  20. data/docker-compose.yml +39 -0
  21. data/examples/consumer-group.rb +35 -0
  22. data/examples/firehose-consumer.rb +64 -0
  23. data/examples/firehose-producer.rb +54 -0
  24. data/examples/simple-consumer.rb +34 -0
  25. data/examples/simple-producer.rb +42 -0
  26. data/examples/ssl-producer.rb +44 -0
  27. data/lib/kafka/async_producer.rb +297 -0
  28. data/lib/kafka/broker.rb +217 -0
  29. data/lib/kafka/broker_info.rb +16 -0
  30. data/lib/kafka/broker_pool.rb +41 -0
  31. data/lib/kafka/broker_uri.rb +43 -0
  32. data/lib/kafka/client.rb +838 -0
  33. data/lib/kafka/cluster.rb +513 -0
  34. data/lib/kafka/compression.rb +45 -0
  35. data/lib/kafka/compressor.rb +86 -0
  36. data/lib/kafka/connection.rb +228 -0
  37. data/lib/kafka/connection_builder.rb +33 -0
  38. data/lib/kafka/consumer.rb +642 -0
  39. data/lib/kafka/consumer_group/assignor.rb +63 -0
  40. data/lib/kafka/consumer_group.rb +231 -0
  41. data/lib/kafka/crc32_hash.rb +15 -0
  42. data/lib/kafka/datadog.rb +420 -0
  43. data/lib/kafka/digest.rb +22 -0
  44. data/lib/kafka/fetch_operation.rb +115 -0
  45. data/lib/kafka/fetched_batch.rb +58 -0
  46. data/lib/kafka/fetched_batch_generator.rb +120 -0
  47. data/lib/kafka/fetched_message.rb +48 -0
  48. data/lib/kafka/fetched_offset_resolver.rb +48 -0
  49. data/lib/kafka/fetcher.rb +224 -0
  50. data/lib/kafka/gzip_codec.rb +34 -0
  51. data/lib/kafka/heartbeat.rb +25 -0
  52. data/lib/kafka/instrumenter.rb +38 -0
  53. data/lib/kafka/interceptors.rb +33 -0
  54. data/lib/kafka/lz4_codec.rb +27 -0
  55. data/lib/kafka/message_buffer.rb +87 -0
  56. data/lib/kafka/murmur2_hash.rb +17 -0
  57. data/lib/kafka/offset_manager.rb +259 -0
  58. data/lib/kafka/partitioner.rb +40 -0
  59. data/lib/kafka/pause.rb +92 -0
  60. data/lib/kafka/pending_message.rb +29 -0
  61. data/lib/kafka/pending_message_queue.rb +41 -0
  62. data/lib/kafka/produce_operation.rb +205 -0
  63. data/lib/kafka/producer.rb +528 -0
  64. data/lib/kafka/prometheus.rb +316 -0
  65. data/lib/kafka/protocol/add_offsets_to_txn_request.rb +29 -0
  66. data/lib/kafka/protocol/add_offsets_to_txn_response.rb +21 -0
  67. data/lib/kafka/protocol/add_partitions_to_txn_request.rb +34 -0
  68. data/lib/kafka/protocol/add_partitions_to_txn_response.rb +47 -0
  69. data/lib/kafka/protocol/alter_configs_request.rb +44 -0
  70. data/lib/kafka/protocol/alter_configs_response.rb +49 -0
  71. data/lib/kafka/protocol/api_versions_request.rb +21 -0
  72. data/lib/kafka/protocol/api_versions_response.rb +53 -0
  73. data/lib/kafka/protocol/consumer_group_protocol.rb +19 -0
  74. data/lib/kafka/protocol/create_partitions_request.rb +42 -0
  75. data/lib/kafka/protocol/create_partitions_response.rb +28 -0
  76. data/lib/kafka/protocol/create_topics_request.rb +45 -0
  77. data/lib/kafka/protocol/create_topics_response.rb +26 -0
  78. data/lib/kafka/protocol/decoder.rb +175 -0
  79. data/lib/kafka/protocol/delete_topics_request.rb +33 -0
  80. data/lib/kafka/protocol/delete_topics_response.rb +26 -0
  81. data/lib/kafka/protocol/describe_configs_request.rb +35 -0
  82. data/lib/kafka/protocol/describe_configs_response.rb +73 -0
  83. data/lib/kafka/protocol/describe_groups_request.rb +27 -0
  84. data/lib/kafka/protocol/describe_groups_response.rb +73 -0
  85. data/lib/kafka/protocol/encoder.rb +184 -0
  86. data/lib/kafka/protocol/end_txn_request.rb +29 -0
  87. data/lib/kafka/protocol/end_txn_response.rb +19 -0
  88. data/lib/kafka/protocol/fetch_request.rb +70 -0
  89. data/lib/kafka/protocol/fetch_response.rb +136 -0
  90. data/lib/kafka/protocol/find_coordinator_request.rb +29 -0
  91. data/lib/kafka/protocol/find_coordinator_response.rb +29 -0
  92. data/lib/kafka/protocol/heartbeat_request.rb +27 -0
  93. data/lib/kafka/protocol/heartbeat_response.rb +17 -0
  94. data/lib/kafka/protocol/init_producer_id_request.rb +26 -0
  95. data/lib/kafka/protocol/init_producer_id_response.rb +27 -0
  96. data/lib/kafka/protocol/join_group_request.rb +47 -0
  97. data/lib/kafka/protocol/join_group_response.rb +41 -0
  98. data/lib/kafka/protocol/leave_group_request.rb +25 -0
  99. data/lib/kafka/protocol/leave_group_response.rb +17 -0
  100. data/lib/kafka/protocol/list_groups_request.rb +23 -0
  101. data/lib/kafka/protocol/list_groups_response.rb +35 -0
  102. data/lib/kafka/protocol/list_offset_request.rb +53 -0
  103. data/lib/kafka/protocol/list_offset_response.rb +89 -0
  104. data/lib/kafka/protocol/member_assignment.rb +42 -0
  105. data/lib/kafka/protocol/message.rb +172 -0
  106. data/lib/kafka/protocol/message_set.rb +55 -0
  107. data/lib/kafka/protocol/metadata_request.rb +31 -0
  108. data/lib/kafka/protocol/metadata_response.rb +185 -0
  109. data/lib/kafka/protocol/offset_commit_request.rb +47 -0
  110. data/lib/kafka/protocol/offset_commit_response.rb +29 -0
  111. data/lib/kafka/protocol/offset_fetch_request.rb +38 -0
  112. data/lib/kafka/protocol/offset_fetch_response.rb +56 -0
  113. data/lib/kafka/protocol/produce_request.rb +94 -0
  114. data/lib/kafka/protocol/produce_response.rb +63 -0
  115. data/lib/kafka/protocol/record.rb +88 -0
  116. data/lib/kafka/protocol/record_batch.rb +223 -0
  117. data/lib/kafka/protocol/request_message.rb +26 -0
  118. data/lib/kafka/protocol/sasl_handshake_request.rb +33 -0
  119. data/lib/kafka/protocol/sasl_handshake_response.rb +28 -0
  120. data/lib/kafka/protocol/sync_group_request.rb +33 -0
  121. data/lib/kafka/protocol/sync_group_response.rb +26 -0
  122. data/lib/kafka/protocol/txn_offset_commit_request.rb +46 -0
  123. data/lib/kafka/protocol/txn_offset_commit_response.rb +47 -0
  124. data/lib/kafka/protocol.rb +225 -0
  125. data/lib/kafka/round_robin_assignment_strategy.rb +52 -0
  126. data/lib/kafka/sasl/awsmskiam.rb +128 -0
  127. data/lib/kafka/sasl/gssapi.rb +76 -0
  128. data/lib/kafka/sasl/oauth.rb +64 -0
  129. data/lib/kafka/sasl/plain.rb +39 -0
  130. data/lib/kafka/sasl/scram.rb +180 -0
  131. data/lib/kafka/sasl_authenticator.rb +73 -0
  132. data/lib/kafka/snappy_codec.rb +29 -0
  133. data/lib/kafka/socket_with_timeout.rb +96 -0
  134. data/lib/kafka/ssl_context.rb +66 -0
  135. data/lib/kafka/ssl_socket_with_timeout.rb +192 -0
  136. data/lib/kafka/statsd.rb +296 -0
  137. data/lib/kafka/tagged_logger.rb +77 -0
  138. data/lib/kafka/transaction_manager.rb +306 -0
  139. data/lib/kafka/transaction_state_machine.rb +72 -0
  140. data/lib/kafka/version.rb +5 -0
  141. data/lib/kafka/zstd_codec.rb +27 -0
  142. data/lib/kafka.rb +373 -0
  143. data/lib/ruby-kafka.rb +5 -0
  144. data/ruby-kafka.gemspec +54 -0
  145. metadata +520 -0
data/lib/kafka.rb ADDED
@@ -0,0 +1,373 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "kafka/version"
4
+
5
+ module Kafka
6
+ class Error < StandardError
7
+ end
8
+
9
+ # There was an error processing a message.
10
+ class ProcessingError < Error
11
+ attr_reader :topic, :partition, :offset
12
+
13
+ def initialize(topic, partition, offset)
14
+ @topic = topic
15
+ @partition = partition
16
+ @offset = offset
17
+
18
+ super()
19
+ end
20
+ end
21
+
22
+ # Subclasses of this exception class map to an error code described in the
23
+ # Kafka protocol specification.
24
+ # https://kafka.apache.org/protocol#protocol_error_codes
25
+ class ProtocolError < Error
26
+ end
27
+
28
+ # -1
29
+ # The server experienced an unexpected error when processing the request
30
+ class UnknownError < ProtocolError
31
+ end
32
+
33
+ # 1
34
+ # The requested offset is not within the range of offsets maintained by the server.
35
+ class OffsetOutOfRange < ProtocolError
36
+ attr_accessor :topic, :partition, :offset
37
+ end
38
+
39
+ # 2
40
+ # This indicates that a message contents does not match its CRC.
41
+ class CorruptMessage < ProtocolError
42
+ end
43
+
44
+ # 3
45
+ # The request is for a topic or partition that does not exist on the broker.
46
+ class UnknownTopicOrPartition < ProtocolError
47
+ end
48
+
49
+ # 4
50
+ # The message has a negative size.
51
+ class InvalidMessageSize < ProtocolError
52
+ end
53
+
54
+ # 5
55
+ # This error is thrown if we are in the middle of a leadership election and
56
+ # there is currently no leader for this partition and hence it is unavailable
57
+ # for writes.
58
+ class LeaderNotAvailable < ProtocolError
59
+ end
60
+
61
+ # 6
62
+ # This error is thrown if the client attempts to send messages to a replica
63
+ # that is not the leader for some partition. It indicates that the client's
64
+ # metadata is out of date.
65
+ class NotLeaderForPartition < ProtocolError
66
+ end
67
+
68
+ # 7
69
+ # This error is thrown if the request exceeds the user-specified time limit
70
+ # in the request.
71
+ class RequestTimedOut < ProtocolError
72
+ end
73
+
74
+ # 8
75
+ # The broker is not available.
76
+ class BrokerNotAvailable < ProtocolError
77
+ end
78
+
79
+ # 9
80
+ # Raised if a replica is expected on a broker, but is not. Can be safely ignored.
81
+ class ReplicaNotAvailable < ProtocolError
82
+ end
83
+
84
+ # 10
85
+ # The server has a configurable maximum message size to avoid unbounded memory
86
+ # allocation. This error is thrown if the client attempt to produce a message
87
+ # larger than this maximum.
88
+ class MessageSizeTooLarge < ProtocolError
89
+ end
90
+
91
+ # 11
92
+ # The controller moved to another broker.
93
+ class StaleControllerEpoch < ProtocolError
94
+ end
95
+
96
+ # 12
97
+ # If you specify a string larger than configured maximum for offset metadata.
98
+ class OffsetMetadataTooLarge < ProtocolError
99
+ end
100
+
101
+ # 13
102
+ # The server disconnected before a response was received.
103
+ class NetworkException < ProtocolError
104
+ end
105
+
106
+ # 14
107
+ # The coordinator is loading and hence can't process requests.
108
+ class CoordinatorLoadInProgress < ProtocolError
109
+ end
110
+
111
+ # 15
112
+ # The coordinator is not available.
113
+ class CoordinatorNotAvailable < ProtocolError
114
+ end
115
+
116
+ # 16
117
+ # This is not the correct coordinator.
118
+ class NotCoordinatorForGroup < ProtocolError
119
+ end
120
+
121
+ # 17
122
+ # For a request which attempts to access an invalid topic (e.g. one which has
123
+ # an illegal name), or if an attempt is made to write to an internal topic
124
+ # (such as the consumer offsets topic).
125
+ class InvalidTopic < ProtocolError
126
+ end
127
+
128
+ # 18
129
+ # If a message batch in a produce request exceeds the maximum configured
130
+ # segment size.
131
+ class RecordListTooLarge < ProtocolError
132
+ end
133
+
134
+ # 19
135
+ # Returned from a produce request when the number of in-sync replicas is
136
+ # lower than the configured minimum and requiredAcks is -1.
137
+ class NotEnoughReplicas < ProtocolError
138
+ end
139
+
140
+ # 20
141
+ # Returned from a produce request when the message was written to the log,
142
+ # but with fewer in-sync replicas than required.
143
+ class NotEnoughReplicasAfterAppend < ProtocolError
144
+ end
145
+
146
+ # 21
147
+ # Returned from a produce request if the requested requiredAcks is invalid
148
+ # (anything other than -1, 1, or 0).
149
+ class InvalidRequiredAcks < ProtocolError
150
+ end
151
+
152
+ # 22
153
+ # Specified group generation id is not valid.
154
+ class IllegalGeneration < ProtocolError
155
+ end
156
+
157
+ # 23
158
+ # The group member's supported protocols are incompatible with those of existing members or first group member tried to join with empty protocol type or empty protocol list.
159
+ class InconsistentGroupProtocol < ProtocolError
160
+ end
161
+
162
+ # 24
163
+ # The configured groupId is invalid
164
+ class InvalidGroupId < ProtocolError
165
+ end
166
+
167
+ # 25
168
+ # The coordinator is not aware of this member.
169
+ class UnknownMemberId < ProtocolError
170
+ end
171
+
172
+ # 26
173
+ # The session timeout is not within the range allowed by the broker
174
+ class InvalidSessionTimeout < ProtocolError
175
+ end
176
+
177
+ # 27
178
+ # The group is rebalancing, so a rejoin is needed.
179
+ class RebalanceInProgress < ProtocolError
180
+ end
181
+
182
+ # 28
183
+ # The committing offset data size is not valid
184
+ class InvalidCommitOffsetSize < ProtocolError
185
+ end
186
+
187
+ # 29
188
+ class TopicAuthorizationFailed < ProtocolError
189
+ end
190
+
191
+ # 30
192
+ class GroupAuthorizationFailed < ProtocolError
193
+ end
194
+
195
+ # 31
196
+ class ClusterAuthorizationFailed < ProtocolError
197
+ end
198
+
199
+ # 32
200
+ # The timestamp of the message is out of acceptable range.
201
+ class InvalidTimestamp < ProtocolError
202
+ end
203
+
204
+ # 33
205
+ # The broker does not support the requested SASL mechanism.
206
+ class UnsupportedSaslMechanism < ProtocolError
207
+ end
208
+
209
+ # 34
210
+ class InvalidSaslState < ProtocolError
211
+ end
212
+
213
+ # 35
214
+ class UnsupportedVersion < ProtocolError
215
+ end
216
+
217
+ # 36
218
+ class TopicAlreadyExists < ProtocolError
219
+ end
220
+
221
+ # 37
222
+ # Number of partitions is below 1.
223
+ class InvalidPartitions < ProtocolError
224
+ end
225
+
226
+ # 38
227
+ # Replication factor is below 1 or larger than the number of available brokers.
228
+ class InvalidReplicationFactor < ProtocolError
229
+ end
230
+
231
+ # 39
232
+ class InvalidReplicaAssignment < ProtocolError
233
+ end
234
+
235
+ # 40
236
+ class InvalidConfig < ProtocolError
237
+ end
238
+
239
+ # 41
240
+ # This is not the correct controller for this cluster.
241
+ class NotController < ProtocolError
242
+ end
243
+
244
+ # 42
245
+ class InvalidRequest < ProtocolError
246
+ end
247
+
248
+ # 43
249
+ # The message format version on the broker does not support the request.
250
+ class UnsupportedForMessageFormat < ProtocolError
251
+ end
252
+
253
+ # 44
254
+ # Request parameters do not satisfy the configured policy.
255
+ class PolicyViolation < ProtocolError
256
+ end
257
+
258
+ # 45
259
+ # The broker received an out of order sequence number
260
+ class OutOfOrderSequenceNumberError < Error
261
+ end
262
+
263
+ # 46
264
+ # The broker received a duplicate sequence number
265
+ class DuplicateSequenceNumberError < Error
266
+ end
267
+
268
+ # 47
269
+ # Producer attempted an operation with an old epoch. Either there is a newer producer with the same transactionalId, or the producer's transaction has been expired by the broker.
270
+ class InvalidProducerEpochError < Error
271
+ end
272
+
273
+ # 48
274
+ # The producer attempted a transactional operation in an invalid state
275
+ class InvalidTxnStateError < Error
276
+ end
277
+
278
+ # 49
279
+ # The producer attempted to use a producer id which is not currently assigned to its transactional id
280
+ class InvalidProducerIDMappingError < Error
281
+ end
282
+
283
+ # 50
284
+ # The transaction timeout is larger than the maximum value allowed by the broker (as configured by transaction.max.timeout.ms).
285
+ class InvalidTransactionTimeoutError < Error
286
+ end
287
+
288
+ # 51
289
+ # The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing
290
+ class ConcurrentTransactionError < Error
291
+ end
292
+
293
+ # 52
294
+ # Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer
295
+ class TransactionCoordinatorFencedError < Error
296
+ end
297
+
298
+ ###
299
+ # ruby-kafka errors
300
+ ###
301
+
302
+ # A fetch operation was executed with no partitions specified.
303
+ class NoPartitionsToFetchFrom < Error
304
+ end
305
+
306
+ # A message in a partition is larger than the maximum we've asked for.
307
+ class MessageTooLargeToRead < Error
308
+ end
309
+
310
+ # A connection has been unused for too long, we assume the server has killed it.
311
+ class IdleConnection < Error
312
+ end
313
+
314
+ # When the record array length doesn't match real number of received records
315
+ class InsufficientDataMessage < Error
316
+ end
317
+ # Raised when there's a network connection error.
318
+ class ConnectionError < Error
319
+ end
320
+
321
+ class NoSuchBroker < Error
322
+ end
323
+
324
+ # Raised when a producer buffer has reached its maximum size.
325
+ class BufferOverflow < Error
326
+ end
327
+
328
+ # Raised if not all messages could be sent by a producer.
329
+ class DeliveryFailed < Error
330
+ attr_reader :failed_messages
331
+
332
+ def initialize(message, failed_messages)
333
+ @failed_messages = failed_messages
334
+
335
+ super(message)
336
+ end
337
+ end
338
+
339
+ class HeartbeatError < Error
340
+ end
341
+
342
+ class OffsetCommitError < Error
343
+ end
344
+
345
+ class FetchError < Error
346
+ end
347
+
348
+ class SaslScramError < Error
349
+ end
350
+
351
+ class FailedScramAuthentication < SaslScramError
352
+ end
353
+
354
+ # The Token Provider object used for SASL OAuthBearer does not implement the method `token`
355
+ class TokenMethodNotImplementedError < Error
356
+ end
357
+
358
+ # Initializes a new Kafka client.
359
+ #
360
+ # @see Client#initialize
361
+ # @return [Client]
362
+ def self.new(seed_brokers = nil, **options)
363
+ # We allow `seed_brokers` to be passed in either as a positional _or_ as a
364
+ # keyword argument.
365
+ if seed_brokers.nil?
366
+ Client.new(**options)
367
+ else
368
+ Client.new(seed_brokers: seed_brokers, **options)
369
+ end
370
+ end
371
+ end
372
+
373
+ require "kafka/client"
data/lib/ruby-kafka.rb ADDED
@@ -0,0 +1,5 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Needed because the gem is registered as `ruby-kafka`.
4
+
5
+ require "kafka"
@@ -0,0 +1,54 @@
1
+ # coding: utf-8
2
+ # frozen_string_literal: true
3
+
4
+ lib = File.expand_path('../lib', __FILE__)
5
+ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
6
+ require 'kafka/version'
7
+
8
+ Gem::Specification.new do |spec|
9
+ spec.name = "ruby-kafka-aws-iam"
10
+ spec.version = Kafka::VERSION
11
+ spec.authors = ["Daniel Schierbeck"]
12
+ spec.email = ["daniel.schierbeck@gmail.com"]
13
+
14
+ spec.summary = "A client library for the Kafka distributed commit log."
15
+
16
+ spec.description = <<-DESC.gsub(/^ /, "").strip
17
+ A client library for the Kafka distributed commit log.
18
+ DESC
19
+
20
+ spec.homepage = "https://github.com/zendesk/ruby-kafka"
21
+ spec.license = "Apache-2.0"
22
+
23
+ spec.required_ruby_version = '>= 2.1.0'
24
+
25
+ spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
26
+ spec.bindir = "exe"
27
+ spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
28
+ spec.require_paths = ["lib"]
29
+
30
+ spec.add_dependency 'digest-crc'
31
+
32
+ spec.add_development_dependency "bundler", ">= 1.9.5"
33
+ spec.add_development_dependency "rake", "~> 10.0"
34
+ spec.add_development_dependency "rspec"
35
+ spec.add_development_dependency "pry"
36
+ spec.add_development_dependency "digest-murmurhash"
37
+ spec.add_development_dependency "dotenv"
38
+ spec.add_development_dependency "docker-api"
39
+ spec.add_development_dependency "rspec-benchmark"
40
+ spec.add_development_dependency "activesupport", ">= 4.0", "< 6.1"
41
+ spec.add_development_dependency "snappy"
42
+ spec.add_development_dependency "extlz4"
43
+ spec.add_development_dependency "zstd-ruby"
44
+ spec.add_development_dependency "colored"
45
+ spec.add_development_dependency "rspec_junit_formatter", "0.2.2"
46
+ spec.add_development_dependency "dogstatsd-ruby", ">= 4.0.0", "< 5.0.0"
47
+ spec.add_development_dependency "statsd-ruby"
48
+ spec.add_development_dependency "prometheus-client", "~> 0.10.0"
49
+ spec.add_development_dependency "ruby-prof"
50
+ spec.add_development_dependency "timecop"
51
+ spec.add_development_dependency "rubocop", "~> 0.49.1"
52
+ spec.add_development_dependency "gssapi", ">= 1.2.0"
53
+ spec.add_development_dependency "stackprof"
54
+ end