ruby-kafka 0.5.1 → 0.5.2.beta1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: d49d8b32a6bc80b5043338fe73a5bdb06e2d0718
4
- data.tar.gz: 0d9aea1954190682562c2f4e2baa2dde3ee562cd
3
+ metadata.gz: 9d3ed93d12793328cee689f4b0ace7da8e8ba951
4
+ data.tar.gz: cdf38141d80608ad6c16fa5866be66204b599c7a
5
5
  SHA512:
6
- metadata.gz: d031ca62f99d9034f1a3671b4710138a76b57ab347adab47062275da7cc8eb303b0b8db34cc502a70384c4dcaa423a968b693d139750b9104da36e30712c3abf
7
- data.tar.gz: 15d5145f8fbf7def123156d05976f3f8666089421b59a3f9bff5e82ea2e5a4c629a885b58f4fc59de31c2cb3dd1fa70e3c5cefbdae709a782427692237703508
6
+ metadata.gz: 23e15780a1c7c9089cc27bac07a077d026ce0e0d4f3132061ecd6da91d42ebea75eb8d8a379c896d2637900c3d355cbedbc306945be08cf9d01a3500289e8722
7
+ data.tar.gz: 3f336771af6ce3c84175717668e7cd2a7e7405521be2a618e57124592999d153d5fdbb75750081b7013fbb5eacd054ab954ae8255dccc49989aef851a5cebdd8
data/CHANGELOG.md CHANGED
@@ -4,6 +4,13 @@ Changes and additions to the library will be listed here.
4
4
 
5
5
  ## Unreleased
6
6
 
7
+ - Instrument the start of message/batch processing (#496).
8
+ - Mark `Client#fetch_messages` as stable.
9
+ - Fix the list topics API (#508).
10
+ - Add support for LZ4 compression (#499).
11
+ - Refactor compression codec lookup (#509).
12
+ - Fix compressed message set offset bug (#506).
13
+
7
14
  ## v0.5.1
8
15
 
9
16
  Requires Kafka 0.10.1+ due to usage of a few new APIs.
data/README.md CHANGED
@@ -95,7 +95,7 @@ This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with t
95
95
 
96
96
  - **Kafka 0.8:** Full support for the Producer API in ruby-kafka v0.4.x, but no support for consumer groups. Simple message fetching works.
97
97
  - **Kafka 0.9:** Full support for the Producer and Consumer API in ruby-kafka v0.4.x.
98
- - **Kafka 0.10:** Full support for the Producer and Consumer API in ruby-kafka v0.5.x.
98
+ - **Kafka 0.10:** Full support for the Producer and Consumer API in ruby-kafka v0.5.x. Note that you _must_ run version 0.10.1 or higher of Kafka due to limitations in 0.10.0.
99
99
  - **Kafka 0.11:** Everything that works with Kafka 0.10 should still work, but so far no features specific to Kafka 0.11 have been added.
100
100
 
101
101
  This library requires Ruby 2.1 or higher.
@@ -719,6 +719,8 @@ ActiveSupport::Notifications.subscribe(/.*\.kafka$/) do |*args|
719
719
  end
720
720
  ```
721
721
 
722
+ All notification events have the `client_id` key in the payload, referring to the Kafka client id.
723
+
722
724
  #### Producer Notifications
723
725
 
724
726
  * `produce_message.producer.kafka` is sent whenever a message is produced to a buffer. It includes the following payload:
@@ -735,6 +737,8 @@ end
735
737
 
736
738
  #### Consumer Notifications
737
739
 
740
+ All notifications have `group_id` in the payload, referring to the Kafka consumer group id.
741
+
738
742
  * `process_message.consumer.kafka` is sent whenever a message is processed by a consumer. It includes the following payload:
739
743
  * `value` is the message value.
740
744
  * `key` is the message key.
@@ -743,12 +747,16 @@ end
743
747
  * `offset` is the message's offset within the topic partition.
744
748
  * `offset_lag` is the number of messages within the topic partition that have not yet been consumed.
745
749
 
750
+ * `start_process_message.consumer.kafka` is sent before `process_message.consumer.kafka`, and contains the same payload. It is delivered _before_ the message is processed, rather than _after_.
751
+
746
752
  * `process_batch.consumer.kafka` is sent whenever a message batch is processed by a consumer. It includes the following payload:
747
753
  * `message_count` is the number of messages in the batch.
748
754
  * `topic` is the topic that the message batch was consumed from.
749
755
  * `partition` is the topic partition that the message batch was consumed from.
750
756
  * `highwater_mark_offset` is the message batch's highest offset within the topic partition.
751
- * `offset_lag` is the number of messages within the topic partition that have not yet been consumed.
757
+ * `offset_lag` is the number of messages within the topic partition that have not yet been consumed.
758
+
759
+ * `start_process_batch.consumer.kafka` is sent before `process_batch.consumer.kafka`, and contains the same payload. It is delivered _before_ the batch is processed, rather than _after_.
752
760
 
753
761
  * `join_group.consumer.kafka` is sent whenever a consumer joins a consumer group. It includes the following payload:
754
762
  * `group_id` is the consumer group id.
data/lib/kafka.rb CHANGED
@@ -147,15 +147,15 @@ module Kafka
147
147
  end
148
148
 
149
149
  # 29
150
- class TopicAuthorizationCode < ProtocolError
150
+ class TopicAuthorizationFailed < ProtocolError
151
151
  end
152
152
 
153
153
  # 30
154
- class GroupAuthorizationCode < ProtocolError
154
+ class GroupAuthorizationFailed < ProtocolError
155
155
  end
156
156
 
157
157
  # 31
158
- class ClusterAuthorizationCode < ProtocolError
158
+ class ClusterAuthorizationFailed < ProtocolError
159
159
  end
160
160
 
161
161
  # 32
@@ -100,7 +100,10 @@ module Kafka
100
100
  def produce(value, topic:, **options)
101
101
  ensure_threads_running!
102
102
 
103
- buffer_overflow(topic) if @queue.size >= @max_queue_size
103
+ if @queue.size >= @max_queue_size
104
+ buffer_overflow topic,
105
+ "Cannot produce to #{topic}, max queue size (#{@max_queue_size} messages) reached"
106
+ end
104
107
 
105
108
  args = [value, **options.merge(topic: topic)]
106
109
  @queue << [:produce, args]
@@ -148,14 +151,12 @@ module Kafka
148
151
  @timer_thread ||= Thread.new { @timer.run }
149
152
  end
150
153
 
151
- def buffer_overflow(topic)
154
+ def buffer_overflow(topic, message)
152
155
  @instrumenter.instrument("buffer_overflow.async_producer", {
153
156
  topic: topic,
154
157
  })
155
158
 
156
- @logger.error "Cannot produce message to #{topic}, max queue size (#{@max_queue_size}) reached"
157
-
158
- raise BufferOverflow
159
+ raise BufferOverflow, message
159
160
  end
160
161
 
161
162
  class Timer
data/lib/kafka/broker.rb CHANGED
@@ -29,10 +29,10 @@ module Kafka
29
29
 
30
30
  # Fetches cluster metadata from the broker.
31
31
  #
32
- # @param (see Kafka::Protocol::TopicMetadataRequest#initialize)
32
+ # @param (see Kafka::Protocol::MetadataRequest#initialize)
33
33
  # @return [Kafka::Protocol::MetadataResponse]
34
34
  def fetch_metadata(**options)
35
- request = Protocol::TopicMetadataRequest.new(**options)
35
+ request = Protocol::MetadataRequest.new(**options)
36
36
 
37
37
  send_request(request)
38
38
  end
data/lib/kafka/client.rb CHANGED
@@ -352,8 +352,6 @@ module Kafka
352
352
  #
353
353
  # See a working example in `examples/simple-consumer.rb`.
354
354
  #
355
- # @note This API is still alpha level. Don't try to use it in production.
356
- #
357
355
  # @param topic [String] the topic that messages should be fetched from.
358
356
  #
359
357
  # @param partition [Integer] the partition that messages should be fetched from.
@@ -451,16 +449,25 @@ module Kafka
451
449
  end
452
450
  end
453
451
 
454
- def create_topic(name, **options)
455
- @cluster.create_topic(name, **options)
452
+ # Creates a topic in the cluster.
453
+ #
454
+ # @param name [String] the name of the topic.
455
+ # @param num_partitions [Integer] the number of partitions that should be created
456
+ # in the topic.
457
+ # @param replication_factor [Integer] the replication factor of the topic.
458
+ # @param timeout [Integer] a duration of time to wait for the topic to be
459
+ # completely created.
460
+ # @raise [Kafka::TopicAlreadyExists] if the topic already exists.
461
+ # @return [nil]
462
+ def create_topic(name, num_partitions: 1, replication_factor: 1, timeout: 30)
463
+ @cluster.create_topic(name, num_partitions: num_partitions, replication_factor: replication_factor, timeout: timeout)
456
464
  end
457
465
 
458
466
  # Lists all topics in the cluster.
459
467
  #
460
468
  # @return [Array<String>] the list of topic names.
461
469
  def topics
462
- @cluster.clear_target_topics
463
- @cluster.topics
470
+ @cluster.list_topics
464
471
  end
465
472
 
466
473
  def has_topic?(topic)
data/lib/kafka/cluster.rb CHANGED
@@ -145,7 +145,7 @@ module Kafka
145
145
  raise
146
146
  end
147
147
 
148
- def create_topic(name, num_partitions: 1, replication_factor: 1, timeout: 30)
148
+ def create_topic(name, num_partitions:, replication_factor:, timeout:)
149
149
  options = {
150
150
  topics: {
151
151
  name => {
@@ -232,6 +232,12 @@ module Kafka
232
232
  cluster_info.topics.map(&:topic_name)
233
233
  end
234
234
 
235
+ # Lists all topics in the cluster.
236
+ def list_topics
237
+ response = random_broker.fetch_metadata(topics: nil)
238
+ response.topics.map(&:topic_name)
239
+ end
240
+
235
241
  def disconnect
236
242
  @broker_pool.close
237
243
  end
@@ -1,23 +1,37 @@
1
1
  require "kafka/snappy_codec"
2
2
  require "kafka/gzip_codec"
3
+ require "kafka/lz4_codec"
3
4
 
4
5
  module Kafka
5
6
  module Compression
7
+ CODEC_NAMES = {
8
+ 1 => :gzip,
9
+ 2 => :snappy,
10
+ 3 => :lz4,
11
+ }.freeze
12
+
13
+ CODECS = {
14
+ :gzip => GzipCodec.new,
15
+ :snappy => SnappyCodec.new,
16
+ :lz4 => LZ4Codec.new,
17
+ }.freeze
18
+
19
+ def self.codecs
20
+ CODECS.keys
21
+ end
22
+
6
23
  def self.find_codec(name)
7
- case name
8
- when nil then nil
9
- when :snappy then SnappyCodec.new
10
- when :gzip then GzipCodec.new
11
- else raise "Unknown compression codec #{name}"
24
+ CODECS.fetch(name) do
25
+ raise "Unknown compression codec #{name}"
12
26
  end
13
27
  end
14
28
 
15
29
  def self.find_codec_by_id(codec_id)
16
- case codec_id
17
- when 1 then GzipCodec.new
18
- when 2 then SnappyCodec.new
19
- else raise "Unknown codec id #{codec_id}"
30
+ codec_name = CODEC_NAMES.fetch(codec_id) do
31
+ raise "Unknown codec id #{codec_id}"
20
32
  end
33
+
34
+ find_codec(codec_name)
21
35
  end
22
36
  end
23
37
  end
@@ -21,14 +21,17 @@ module Kafka
21
21
  # @param threshold [Integer] the minimum number of messages in a message set
22
22
  # that will trigger compression.
23
23
  def initialize(codec_name: nil, threshold: 1, instrumenter:)
24
- @codec = Compression.find_codec(codec_name)
24
+ # Codec may be nil, in which case we won't compress.
25
+ @codec = codec_name && Compression.find_codec(codec_name)
26
+
25
27
  @threshold = threshold
26
28
  @instrumenter = instrumenter
27
29
  end
28
30
 
29
31
  # @param message_set [Protocol::MessageSet]
32
+ # @param offset [Integer] used to simulate broker behaviour in tests
30
33
  # @return [Protocol::MessageSet]
31
- def compress(message_set)
34
+ def compress(message_set, offset: -1)
32
35
  return message_set if @codec.nil? || message_set.size < @threshold
33
36
 
34
37
  compressed_data = compress_data(message_set)
@@ -36,6 +39,7 @@ module Kafka
36
39
  wrapper_message = Protocol::Message.new(
37
40
  value: compressed_data,
38
41
  codec_id: @codec.codec_id,
42
+ offset: offset
39
43
  )
40
44
 
41
45
  Protocol::MessageSet.new(messages: [wrapper_message])
@@ -107,7 +107,7 @@ module Kafka
107
107
 
108
108
  response
109
109
  end
110
- rescue SystemCallError, EOFError => e
110
+ rescue SystemCallError, EOFError, IOError => e
111
111
  close
112
112
 
113
113
  raise ConnectionError, "Connection error #{e.class}: #{e}"
@@ -202,18 +202,31 @@ module Kafka
202
202
  )
203
203
 
204
204
  batches.each do |batch|
205
+ unless batch.empty?
206
+ @instrumenter.instrument("fetch_batch.consumer", {
207
+ topic: batch.topic,
208
+ partition: batch.partition,
209
+ offset_lag: batch.offset_lag,
210
+ highwater_mark_offset: batch.highwater_mark_offset,
211
+ message_count: batch.messages.count,
212
+ })
213
+ end
205
214
  batch.messages.each do |message|
206
- @instrumenter.instrument("process_message.consumer") do |notification|
207
- notification.update(
208
- topic: message.topic,
209
- partition: message.partition,
210
- offset: message.offset,
211
- offset_lag: batch.highwater_mark_offset - message.offset - 1,
212
- create_time: message.create_time,
213
- key: message.key,
214
- value: message.value,
215
- )
216
-
215
+ notification = {
216
+ topic: message.topic,
217
+ partition: message.partition,
218
+ offset: message.offset,
219
+ offset_lag: batch.highwater_mark_offset - message.offset - 1,
220
+ create_time: message.create_time,
221
+ key: message.key,
222
+ value: message.value,
223
+ }
224
+
225
+ # Instrument an event immediately so that subscribers don't have to wait until
226
+ # the block is completed.
227
+ @instrumenter.instrument("start_process_message.consumer", notification)
228
+
229
+ @instrumenter.instrument("process_message.consumer", notification) do
217
230
  begin
218
231
  yield message
219
232
  @current_offsets[message.topic][message.partition] = message.offset
@@ -278,15 +291,19 @@ module Kafka
278
291
 
279
292
  batches.each do |batch|
280
293
  unless batch.empty?
281
- @instrumenter.instrument("process_batch.consumer") do |notification|
282
- notification.update(
283
- topic: batch.topic,
284
- partition: batch.partition,
285
- offset_lag: batch.offset_lag,
286
- highwater_mark_offset: batch.highwater_mark_offset,
287
- message_count: batch.messages.count,
288
- )
289
-
294
+ notification = {
295
+ topic: batch.topic,
296
+ partition: batch.partition,
297
+ offset_lag: batch.offset_lag,
298
+ highwater_mark_offset: batch.highwater_mark_offset,
299
+ message_count: batch.messages.count,
300
+ }
301
+
302
+ # Instrument an event immediately so that subscribers don't have to wait until
303
+ # the block is completed.
304
+ @instrumenter.instrument("start_process_batch.consumer", notification)
305
+
306
+ @instrumenter.instrument("process_batch.consumer", notification) do
290
307
  begin
291
308
  yield batch
292
309
  @current_offsets[batch.topic][batch.partition] = batch.last_offset
@@ -365,6 +382,9 @@ module Kafka
365
382
  @logger.error "Leader not available; waiting 1s before retrying"
366
383
  @cluster.mark_as_stale!
367
384
  sleep 1
385
+ rescue ConnectionError => e
386
+ @logger.error "Connection error #{e.class}: #{e.message}"
387
+ @cluster.mark_as_stale!
368
388
  rescue SignalException => e
369
389
  @logger.warn "Received signal #{e.message}, shutting down"
370
390
  @running = false
data/lib/kafka/datadog.rb CHANGED
@@ -229,8 +229,12 @@ module Kafka
229
229
  # This gets us the write rate.
230
230
  increment("producer.produce.messages", tags: tags.merge(topic: topic))
231
231
 
232
+ # Information about typical/average/95p message size.
232
233
  histogram("producer.produce.message_size", message_size, tags: tags.merge(topic: topic))
233
234
 
235
+ # Aggregate message size.
236
+ count("producer.produce.message_size.sum", message_size, tags: tags.merge(topic: topic))
237
+
234
238
  # This gets us the avg/max buffer size per producer.
235
239
  histogram("producer.buffer.size", buffer_size, tags: tags)
236
240
 
@@ -0,0 +1,21 @@
1
+ module Kafka
2
+ class LZ4Codec
3
+ def initialize
4
+ require "extlz4"
5
+ rescue LoadError
6
+ raise LoadError, "using lz4 compression requires adding a dependency on the `extlz4` gem to your Gemfile."
7
+ end
8
+
9
+ def codec_id
10
+ 3
11
+ end
12
+
13
+ def compress(data)
14
+ LZ4.encode(data)
15
+ end
16
+
17
+ def decompress(data)
18
+ LZ4.decode(data)
19
+ end
20
+ end
21
+ end
@@ -71,9 +71,9 @@ module Kafka
71
71
  26 => InvalidSessionTimeout,
72
72
  27 => RebalanceInProgress,
73
73
  28 => InvalidCommitOffsetSize,
74
- 29 => TopicAuthorizationCode,
75
- 30 => GroupAuthorizationCode,
76
- 31 => ClusterAuthorizationCode,
74
+ 29 => TopicAuthorizationFailed,
75
+ 30 => GroupAuthorizationFailed,
76
+ 31 => ClusterAuthorizationFailed,
77
77
  32 => InvalidTimestamp,
78
78
  33 => UnsupportedSaslMechanism,
79
79
  34 => InvalidSaslState,
@@ -113,7 +113,7 @@ module Kafka
113
113
  end
114
114
  end
115
115
 
116
- require "kafka/protocol/topic_metadata_request"
116
+ require "kafka/protocol/metadata_request"
117
117
  require "kafka/protocol/metadata_response"
118
118
  require "kafka/protocol/produce_request"
119
119
  require "kafka/protocol/produce_response"
@@ -74,8 +74,13 @@ module Kafka
74
74
  # @param array [Array]
75
75
  # @return [nil]
76
76
  def write_array(array, &block)
77
- write_int32(array.size)
78
- array.each(&block)
77
+ if array.nil?
78
+ # An array can be null, which is different from it being empty.
79
+ write_int32(-1)
80
+ else
81
+ write_int32(array.size)
82
+ array.each(&block)
83
+ end
79
84
  end
80
85
 
81
86
  # Writes a string to the IO object.
@@ -58,18 +58,7 @@ module Kafka
58
58
  message_set_decoder = Decoder.from_string(data)
59
59
  message_set = MessageSet.decode(message_set_decoder)
60
60
 
61
- # The contained messages need to have their offset corrected.
62
- messages = message_set.messages.each_with_index.map do |message, i|
63
- Message.new(
64
- offset: offset + i,
65
- value: message.value,
66
- key: message.key,
67
- create_time: message.create_time,
68
- codec_id: message.codec_id
69
- )
70
- end
71
-
72
- MessageSet.new(messages: messages)
61
+ correct_offsets(message_set)
73
62
  end
74
63
 
75
64
  def self.decode(decoder)
@@ -113,6 +102,36 @@ module Kafka
113
102
 
114
103
  private
115
104
 
105
+ # Offsets may be relative with regards to wrapped message offset, but there are special cases.
106
+ #
107
+ # Cases when client will receive corrected offsets:
108
+ # - When fetch request is version 0, kafka will correct relative offset on broker side before replying fetch response
109
+ # - When messages is stored in 0.9 format on disk (broker configured to do so).
110
+ #
111
+ # All other cases, compressed inner messages should have relative offset, with below attributes:
112
+ # - The container message should have the 'real' offset
113
+ # - The container message's offset should be the 'real' offset of the last message in the compressed batch
114
+ def correct_offsets(message_set)
115
+ max_relative_offset = message_set.messages.last.offset
116
+
117
+ # The offsets are already correct, do nothing.
118
+ return message_set if max_relative_offset == offset
119
+
120
+ # The contained messages have relative offsets, and needs to be corrected.
121
+ base_offset = offset - max_relative_offset
122
+ messages = message_set.messages.map do |message|
123
+ Message.new(
124
+ offset: message.offset + base_offset,
125
+ value: message.value,
126
+ key: message.key,
127
+ create_time: message.create_time,
128
+ codec_id: message.codec_id
129
+ )
130
+ end
131
+
132
+ MessageSet.new(messages: messages)
133
+ end
134
+
116
135
  def encode_with_crc
117
136
  buffer = StringIO.new
118
137
  encoder = Encoder.new(buffer)
@@ -1,6 +1,6 @@
1
1
  module Kafka
2
2
  module Protocol
3
- class TopicMetadataRequest
3
+ class MetadataRequest
4
4
 
5
5
  # A request for cluster metadata.
6
6
  #
@@ -1,7 +1,7 @@
1
1
  module Kafka
2
2
  module Protocol
3
3
 
4
- # A response to a {TopicMetadataRequest}.
4
+ # A response to a {MetadataRequest}.
5
5
  #
6
6
  # The response contains information on the brokers, topics, and partitions in
7
7
  # the cluster.
data/lib/kafka/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module Kafka
2
- VERSION = "0.5.1"
2
+ VERSION = "0.5.2.beta1"
3
3
  end
data/ruby-kafka.gemspec CHANGED
@@ -35,6 +35,7 @@ Gem::Specification.new do |spec|
35
35
  spec.add_development_dependency "rspec-benchmark"
36
36
  spec.add_development_dependency "activesupport"
37
37
  spec.add_development_dependency "snappy"
38
+ spec.add_development_dependency "extlz4"
38
39
  spec.add_development_dependency "colored"
39
40
  spec.add_development_dependency "rspec_junit_formatter", "0.2.2"
40
41
  spec.add_development_dependency "dogstatsd-ruby", ">= 3.0.0"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.1
4
+ version: 0.5.2.beta1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Daniel Schierbeck
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2017-11-14 00:00:00.000000000 Z
11
+ date: 2017-12-20 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -136,6 +136,20 @@ dependencies:
136
136
  - - ">="
137
137
  - !ruby/object:Gem::Version
138
138
  version: '0'
139
+ - !ruby/object:Gem::Dependency
140
+ name: extlz4
141
+ requirement: !ruby/object:Gem::Requirement
142
+ requirements:
143
+ - - ">="
144
+ - !ruby/object:Gem::Version
145
+ version: '0'
146
+ type: :development
147
+ prerelease: false
148
+ version_requirements: !ruby/object:Gem::Requirement
149
+ requirements:
150
+ - - ">="
151
+ - !ruby/object:Gem::Version
152
+ version: '0'
139
153
  - !ruby/object:Gem::Dependency
140
154
  name: colored
141
155
  requirement: !ruby/object:Gem::Requirement
@@ -298,6 +312,7 @@ files:
298
312
  - lib/kafka/gzip_codec.rb
299
313
  - lib/kafka/heartbeat.rb
300
314
  - lib/kafka/instrumenter.rb
315
+ - lib/kafka/lz4_codec.rb
301
316
  - lib/kafka/message_buffer.rb
302
317
  - lib/kafka/offset_manager.rb
303
318
  - lib/kafka/partitioner.rb
@@ -328,6 +343,7 @@ files:
328
343
  - lib/kafka/protocol/member_assignment.rb
329
344
  - lib/kafka/protocol/message.rb
330
345
  - lib/kafka/protocol/message_set.rb
346
+ - lib/kafka/protocol/metadata_request.rb
331
347
  - lib/kafka/protocol/metadata_response.rb
332
348
  - lib/kafka/protocol/offset_commit_request.rb
333
349
  - lib/kafka/protocol/offset_commit_response.rb
@@ -340,7 +356,6 @@ files:
340
356
  - lib/kafka/protocol/sasl_handshake_response.rb
341
357
  - lib/kafka/protocol/sync_group_request.rb
342
358
  - lib/kafka/protocol/sync_group_response.rb
343
- - lib/kafka/protocol/topic_metadata_request.rb
344
359
  - lib/kafka/round_robin_assignment_strategy.rb
345
360
  - lib/kafka/sasl/gssapi.rb
346
361
  - lib/kafka/sasl/plain.rb
@@ -382,9 +397,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
382
397
  version: 2.1.0
383
398
  required_rubygems_version: !ruby/object:Gem::Requirement
384
399
  requirements:
385
- - - ">="
400
+ - - ">"
386
401
  - !ruby/object:Gem::Version
387
- version: '0'
402
+ version: 1.3.1
388
403
  requirements: []
389
404
  rubyforge_project:
390
405
  rubygems_version: 2.6.13