ruby-kafka 0.7.0.beta1 → 0.7.0.beta2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1662d1f7534da36a4a6f5ade9e194fd5142034ba4b53d1b6291fcf233e7eb684
4
- data.tar.gz: 36b9ecb999f55cfd797c23da795c214c7b9712b42a88e6343024e0e17a123eac
3
+ metadata.gz: e26d3e4028b3b35f16ab12973d386651580c76258e572621e2a84a337fee31f8
4
+ data.tar.gz: 27f0a98a1cd057d81443c4b43a71a60a697f5c3add8adf30ca882161da82a242
5
5
  SHA512:
6
- metadata.gz: 8b22b391b89199f8001401069f5040bac248866f82b8bd1f69ecf3d67d947ef7f54f6d933a68a9114c2a28f54097be4cedb3c74743bae1b231f313a4427ad257
7
- data.tar.gz: '0318b5a5cd085ea2c96e7e1bf3c91d914c3898d555b6bdf5681851907e0dec4e2f3ab39c95bf8c289558c41d0c901b243ea7e240e2271235769a2378069e1bbc'
6
+ metadata.gz: 84690e7b9de72785edb5cb4e8d32e9127b4ea2a8b03fe9d8232455d518818813a270534f3c1b1cd033005adc26a614c0f63ef2190a29da7693e3ef729fb83c02
7
+ data.tar.gz: 4dc2def15cd9072e201b7a1f012a0cd593a0be76a859b1abacc608580b5257f265e013c6104aee4decf3237daf6522037cb510d834d138e83c44edc15122301c
data/CHANGELOG.md CHANGED
@@ -7,6 +7,11 @@ Changes and additions to the library will be listed here.
7
7
  - Drop support for Kafka 0.10 in favor of native support for Kafka 0.11.
8
8
  - Support record headers (#604).
9
9
  - Add instrumenter and logger when async message delivery fails (#603).
10
+ - Upgrade and rename GroupCoordinator API to FindCoordinator API (#606).
11
+ - Refresh cluster metadata after topic re-assignment (#609).
12
+ - Disable SASL over SSL with a new config (#613).
13
+ - Allow listing brokers in a cluster (#626).
14
+ - Fix Fetcher's message skipping (#625).
10
15
 
11
16
  ## 0.6.7
12
17
 
data/README.md CHANGED
@@ -771,7 +771,16 @@ All notifications have `group_id` in the payload, referring to the Kafka consume
771
771
 
772
772
  * `leave_group.consumer.kafka` is sent whenever a consumer leaves a consumer group. It includes the following payload:
773
773
  * `group_id` is the consumer group id.
774
-
774
+
775
+ * `seek.consumer.kafka` is sent when a consumer first seeks to an offset. It includes the following payload:
776
+ * `group_id` is the consumer group id.
777
+ * `topic` is the topic we are seeking in.
778
+ * `partition` is the partition we are seeking in.
779
+ * `offset` is the offset we have seeked to.
780
+
781
+ * `heartbeat.consumer.kafka` is sent when a consumer group completes a heartbeat. It includes the following payload:
782
+ * `group_id` is the consumer group id.
783
+ * `topic_partitions` is a hash of { topic_name => array of assigned partition IDs }
775
784
 
776
785
  #### Connection Notifications
777
786
 
@@ -926,7 +935,7 @@ Typically, Kafka certificates come in the JKS format, which isn't supported by r
926
935
 
927
936
  Kafka has support for using SASL to authenticate clients. Currently GSSAPI, SCRAM and PLAIN mechanisms are supported by ruby-kafka.
928
937
 
929
- **NOTE:** In order to use SASL for authentication, you need to configure SSL encryption by passing `ssl_ca_cert` or enabling `ssl_ca_certs_from_system`.
938
+ **NOTE:** With SASL for authentication, it is highly recommended to use SSL encryption. The default behavior of ruby-kafka enforces you to use SSL and you need to configure SSL encryption by passing `ssl_ca_cert` or enabling `ssl_ca_certs_from_system`. However, this strict SSL mode check can be disabled by setting `sasl_over_ssl` to `false` while initializing the client.
930
939
 
931
940
  ##### GSSAPI
932
941
  In order to authenticate using GSSAPI, set your principal and optionally your keytab when initializing the Kafka client:
@@ -946,15 +955,13 @@ In order to authenticate using PLAIN, you must set your username and password wh
946
955
  ```ruby
947
956
  kafka = Kafka.new(
948
957
  ["kafka1:9092"],
949
- ssl_ca_cert: File.read('/etc/openssl/cert.pem'), # Optional but highly recommended
958
+ ssl_ca_cert: File.read('/etc/openssl/cert.pem'),
950
959
  sasl_plain_username: 'username',
951
960
  sasl_plain_password: 'password'
952
961
  # ...
953
962
  )
954
963
  ```
955
964
 
956
- **NOTE**: It is __highly__ recommended that you use SSL for encryption when using SASL_PLAIN
957
-
958
965
  ##### SCRAM
959
966
  Since 0.11 kafka supports [SCRAM](https://kafka.apache.org/documentation.html#security_sasl_scram).
960
967
 
data/lib/kafka.rb CHANGED
@@ -91,7 +91,7 @@ module Kafka
91
91
  class OffsetMetadataTooLarge < ProtocolError
92
92
  end
93
93
 
94
- class GroupCoordinatorNotAvailable < ProtocolError
94
+ class CoordinatorNotAvailable < ProtocolError
95
95
  end
96
96
 
97
97
  class NotCoordinatorForGroup < ProtocolError
data/lib/kafka/broker.rb CHANGED
@@ -104,8 +104,8 @@ module Kafka
104
104
  send_request(request)
105
105
  end
106
106
 
107
- def find_group_coordinator(**options)
108
- request = Protocol::GroupCoordinatorRequest.new(**options)
107
+ def find_coordinator(**options)
108
+ request = Protocol::FindCoordinatorRequest.new(**options)
109
109
 
110
110
  send_request(request)
111
111
  end
@@ -0,0 +1,16 @@
1
+ # Represents a broker in a Kafka cluster.
2
+ module Kafka
3
+ class BrokerInfo
4
+ attr_reader :node_id, :host, :port
5
+
6
+ def initialize(node_id:, host:, port:)
7
+ @node_id = node_id
8
+ @host = host
9
+ @port = port
10
+ end
11
+
12
+ def to_s
13
+ "#{host}:#{port} (node_id=#{node_id})"
14
+ end
15
+ end
16
+ end
data/lib/kafka/client.rb CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  require "kafka/ssl_context"
4
4
  require "kafka/cluster"
5
+ require "kafka/broker_info"
5
6
  require "kafka/producer"
6
7
  require "kafka/consumer"
7
8
  require "kafka/heartbeat"
@@ -54,12 +55,15 @@ module Kafka
54
55
  #
55
56
  # @param sasl_scram_mechanism [String, nil] Scram mechanism, either "sha256" or "sha512"
56
57
  #
58
+ # @param sasl_over_ssl [Boolean] whether to enforce SSL with SASL
59
+ #
57
60
  # @return [Client]
58
61
  def initialize(seed_brokers:, client_id: "ruby-kafka", logger: nil, connect_timeout: nil, socket_timeout: nil,
59
62
  ssl_ca_cert_file_path: nil, ssl_ca_cert: nil, ssl_client_cert: nil, ssl_client_cert_key: nil,
60
63
  sasl_gssapi_principal: nil, sasl_gssapi_keytab: nil,
61
64
  sasl_plain_authzid: '', sasl_plain_username: nil, sasl_plain_password: nil,
62
- sasl_scram_username: nil, sasl_scram_password: nil, sasl_scram_mechanism: nil, ssl_ca_certs_from_system: false)
65
+ sasl_scram_username: nil, sasl_scram_password: nil, sasl_scram_mechanism: nil,
66
+ sasl_over_ssl: true, ssl_ca_certs_from_system: false)
63
67
  @logger = logger || Logger.new(nil)
64
68
  @instrumenter = Instrumenter.new(client_id: client_id)
65
69
  @seed_brokers = normalize_seed_brokers(seed_brokers)
@@ -84,7 +88,7 @@ module Kafka
84
88
  logger: @logger
85
89
  )
86
90
 
87
- if sasl_authenticator.enabled? && ssl_context.nil?
91
+ if sasl_authenticator.enabled? && sasl_over_ssl && ssl_context.nil?
88
92
  raise ArgumentError, "SASL authentication requires that SSL is configured"
89
93
  end
90
94
 
@@ -313,6 +317,7 @@ module Kafka
313
317
 
314
318
  fetcher = Fetcher.new(
315
319
  cluster: initialize_cluster,
320
+ group: group,
316
321
  logger: @logger,
317
322
  instrumenter: instrumenter,
318
323
  max_queue_size: fetcher_max_queue_size
@@ -473,7 +478,7 @@ module Kafka
473
478
 
474
479
  batches.each do |batch|
475
480
  batch.messages.each(&block)
476
- offsets[batch.partition] = batch.last_offset + 1
481
+ offsets[batch.partition] = batch.last_offset + 1 unless batch.empty?
477
482
  end
478
483
  end
479
484
  end
@@ -648,6 +653,20 @@ module Kafka
648
653
  @cluster.apis
649
654
  end
650
655
 
656
+ # List all brokers in the cluster.
657
+ #
658
+ # @return [Array<Kafka::BrokerInfo>] the list of brokers.
659
+ def brokers
660
+ @cluster.cluster_info.brokers
661
+ end
662
+
663
+ # The current controller broker in the cluster.
664
+ #
665
+ # @return [Kafka::BrokerInfo] information on the controller broker.
666
+ def controller_broker
667
+ brokers.find {|broker| broker.node_id == @cluster.cluster_info.controller_id }
668
+ end
669
+
651
670
  # Closes all connections to the Kafka brokers and frees up used resources.
652
671
  #
653
672
  # @return [nil]
data/lib/kafka/cluster.rb CHANGED
@@ -40,14 +40,17 @@ module Kafka
40
40
  # @param topics [Array<String>]
41
41
  # @return [nil]
42
42
  def add_target_topics(topics)
43
- new_topics = Set.new(topics) - @target_topics
43
+ topics = Set.new(topics)
44
+ unless topics.subset?(@target_topics)
45
+ new_topics = topics - @target_topics
44
46
 
45
- unless new_topics.empty?
46
- @logger.info "New topics added to target list: #{new_topics.to_a.join(', ')}"
47
+ unless new_topics.empty?
48
+ @logger.info "New topics added to target list: #{new_topics.to_a.join(', ')}"
47
49
 
48
- @target_topics.merge(new_topics)
50
+ @target_topics.merge(new_topics)
49
51
 
50
- refresh_metadata!
52
+ refresh_metadata!
53
+ end
51
54
  end
52
55
  end
53
56
 
@@ -116,9 +119,12 @@ module Kafka
116
119
  cluster_info.brokers.each do |broker_info|
117
120
  begin
118
121
  broker = connect_to_broker(broker_info.node_id)
119
- response = broker.find_group_coordinator(group_id: group_id)
122
+ response = broker.find_coordinator(
123
+ coordinator_type: Kafka::Protocol::COORDINATOR_TYPE_GROUP,
124
+ coordinator_key: group_id
125
+ )
120
126
 
121
- Protocol.handle_error(response.error_code)
127
+ Protocol.handle_error(response.error_code, response.error_message)
122
128
 
123
129
  coordinator_id = response.coordinator_id
124
130
 
@@ -137,7 +143,7 @@ module Kafka
137
143
  @logger.debug "Connected to coordinator: #{coordinator} for group `#{group_id}`"
138
144
 
139
145
  return coordinator
140
- rescue GroupCoordinatorNotAvailable
146
+ rescue CoordinatorNotAvailable
141
147
  @logger.debug "Coordinator not available; retrying in 1s"
142
148
  sleep 1
143
149
  retry
@@ -357,16 +363,16 @@ module Kafka
357
363
  @broker_pool.close
358
364
  end
359
365
 
366
+ def cluster_info
367
+ @cluster_info ||= fetch_cluster_info
368
+ end
369
+
360
370
  private
361
371
 
362
372
  def get_leader_id(topic, partition)
363
373
  cluster_info.find_leader_id(topic, partition)
364
374
  end
365
375
 
366
- def cluster_info
367
- @cluster_info ||= fetch_cluster_info
368
- end
369
-
370
376
  # Fetches the cluster metadata.
371
377
  #
372
378
  # This is used to update the partition leadership information, among other things.
@@ -444,6 +444,7 @@ module Kafka
444
444
  if old_generation_id && @group.generation_id != old_generation_id + 1
445
445
  # We've been out of the group for at least an entire generation, no
446
446
  # sense in trying to hold on to offset data
447
+ clear_current_offsets
447
448
  @offset_manager.clear_offsets
448
449
  else
449
450
  # After rejoining the group we may have been assigned a new set of
@@ -451,6 +452,7 @@ module Kafka
451
452
  # having the consumer go back and reprocess messages if it's assigned
452
453
  # a partition it used to be assigned to way back. For that reason, we
453
454
  # only keep commits for the partitions that we're still assigned.
455
+ clear_current_offsets(excluding: @group.assigned_partitions)
454
456
  @offset_manager.clear_offsets_excluding(@group.assigned_partitions)
455
457
  end
456
458
 
@@ -537,5 +539,13 @@ module Kafka
537
539
  def pause_for(topic, partition)
538
540
  @pauses[topic][partition]
539
541
  end
542
+
543
+ def clear_current_offsets(excluding: {})
544
+ @current_offsets.each do |topic, partitions|
545
+ partitions.keep_if do |partition, _|
546
+ excluding.fetch(topic, []).include?(partition)
547
+ end
548
+ end
549
+ end
540
550
  end
541
551
  end
@@ -5,7 +5,7 @@ require "kafka/round_robin_assignment_strategy"
5
5
 
6
6
  module Kafka
7
7
  class ConsumerGroup
8
- attr_reader :assigned_partitions, :generation_id
8
+ attr_reader :assigned_partitions, :generation_id, :group_id
9
9
 
10
10
  def initialize(cluster:, logger:, group_id:, session_timeout:, retention_time:, instrumenter:)
11
11
  @cluster = cluster
@@ -100,13 +100,18 @@ module Kafka
100
100
  def heartbeat
101
101
  @logger.debug "Sending heartbeat..."
102
102
 
103
- response = coordinator.heartbeat(
104
- group_id: @group_id,
105
- generation_id: @generation_id,
106
- member_id: @member_id,
107
- )
103
+ @instrumenter.instrument('heartbeat.consumer',
104
+ group_id: @group_id,
105
+ topic_partitions: @assigned_partitions) do
108
106
 
109
- Protocol.handle_error(response.error_code)
107
+ response = coordinator.heartbeat(
108
+ group_id: @group_id,
109
+ generation_id: @generation_id,
110
+ member_id: @member_id,
111
+ )
112
+
113
+ Protocol.handle_error(response.error_code)
114
+ end
110
115
  rescue ConnectionError, UnknownMemberId, RebalanceInProgress, IllegalGeneration => e
111
116
  @logger.error "Error sending heartbeat: #{e}"
112
117
  raise HeartbeatError, e
@@ -183,7 +188,7 @@ module Kafka
183
188
 
184
189
  def coordinator
185
190
  @coordinator ||= @cluster.get_group_coordinator(group_id: @group_id)
186
- rescue GroupCoordinatorNotAvailable
191
+ rescue CoordinatorNotAvailable
187
192
  @logger.error "Group coordinator not available for group `#{@group_id}`"
188
193
 
189
194
  sleep 1
data/lib/kafka/fetcher.rb CHANGED
@@ -6,11 +6,12 @@ module Kafka
6
6
  class Fetcher
7
7
  attr_reader :queue
8
8
 
9
- def initialize(cluster:, logger:, instrumenter:, max_queue_size:)
9
+ def initialize(cluster:, logger:, instrumenter:, max_queue_size:, group:)
10
10
  @cluster = cluster
11
11
  @logger = logger
12
12
  @instrumenter = instrumenter
13
13
  @max_queue_size = max_queue_size
14
+ @group = group
14
15
 
15
16
  @queue = Queue.new
16
17
  @commands = Queue.new
@@ -122,6 +123,11 @@ module Kafka
122
123
  end
123
124
 
124
125
  def handle_seek(topic, partition, offset)
126
+ @instrumenter.instrument('seek.consumer',
127
+ group_id: @group.group_id,
128
+ topic: topic,
129
+ partition: partition,
130
+ offset: offset)
125
131
  @logger.info "Seeking #{topic}/#{partition} to offset #{offset}"
126
132
  @next_offsets[topic][partition] = offset
127
133
  end
@@ -138,9 +144,9 @@ module Kafka
138
144
  highwater_mark_offset: batch.highwater_mark_offset,
139
145
  message_count: batch.messages.count,
140
146
  })
141
- end
142
147
 
143
- @next_offsets[batch.topic][batch.partition] = batch.last_offset + 1
148
+ @next_offsets[batch.topic][batch.partition] = batch.last_offset + 1
149
+ end
144
150
  end
145
151
 
146
152
  @queue << [:batches, batches]
@@ -20,7 +20,7 @@ module Kafka
20
20
  TOPIC_METADATA_API = 3
21
21
  OFFSET_COMMIT_API = 8
22
22
  OFFSET_FETCH_API = 9
23
- GROUP_COORDINATOR_API = 10
23
+ FIND_COORDINATOR_API = 10
24
24
  JOIN_GROUP_API = 11
25
25
  HEARTBEAT_API = 12
26
26
  LEAVE_GROUP_API = 13
@@ -43,7 +43,7 @@ module Kafka
43
43
  TOPIC_METADATA_API => :topic_metadata,
44
44
  OFFSET_COMMIT_API => :offset_commit,
45
45
  OFFSET_FETCH_API => :offset_fetch,
46
- GROUP_COORDINATOR_API => :group_coordinator,
46
+ FIND_COORDINATOR_API => :find_coordinator,
47
47
  JOIN_GROUP_API => :join_group,
48
48
  HEARTBEAT_API => :heartbeat,
49
49
  LEAVE_GROUP_API => :leave_group,
@@ -70,7 +70,7 @@ module Kafka
70
70
  9 => ReplicaNotAvailable,
71
71
  10 => MessageSizeTooLarge,
72
72
  12 => OffsetMetadataTooLarge,
73
- 15 => GroupCoordinatorNotAvailable,
73
+ 15 => CoordinatorNotAvailable,
74
74
  16 => NotCoordinatorForGroup,
75
75
  17 => InvalidTopic,
76
76
  18 => RecordListTooLarge,
@@ -117,6 +117,11 @@ module Kafka
117
117
  RESOURCE_TYPE_DELEGATION_TOKEN => :delegation_token,
118
118
  }
119
119
 
120
+ # Coordinator types. Since Kafka 0.11.0, there are types of coordinators:
121
+ # Group and Transaction
122
+ COORDINATOR_TYPE_GROUP = 0
123
+ COORDINATOR_TYPE_TRANSACTION = 1
124
+
120
125
  # Handles an error code by either doing nothing (if there was no error) or
121
126
  # by raising an appropriate exception.
122
127
  #
@@ -151,8 +156,8 @@ require "kafka/protocol/fetch_request"
151
156
  require "kafka/protocol/fetch_response"
152
157
  require "kafka/protocol/list_offset_request"
153
158
  require "kafka/protocol/list_offset_response"
154
- require "kafka/protocol/group_coordinator_request"
155
- require "kafka/protocol/group_coordinator_response"
159
+ require "kafka/protocol/find_coordinator_request"
160
+ require "kafka/protocol/find_coordinator_response"
156
161
  require "kafka/protocol/join_group_request"
157
162
  require "kafka/protocol/join_group_response"
158
163
  require "kafka/protocol/sync_group_request"
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class FindCoordinatorRequest
6
+ def initialize(coordinator_key:, coordinator_type:)
7
+ @coordinator_key = coordinator_key
8
+ @coordinator_type = coordinator_type
9
+ end
10
+
11
+ def api_key
12
+ FIND_COORDINATOR_API
13
+ end
14
+
15
+ def api_version
16
+ 1
17
+ end
18
+
19
+ def encode(encoder)
20
+ encoder.write_string(@coordinator_key)
21
+ encoder.write_int8(@coordinator_type)
22
+ end
23
+
24
+ def response_class
25
+ FindCoordinatorResponse
26
+ end
27
+ end
28
+ end
29
+ end
@@ -2,12 +2,12 @@
2
2
 
3
3
  module Kafka
4
4
  module Protocol
5
- class GroupCoordinatorResponse
6
- attr_reader :error_code
5
+ class FindCoordinatorResponse
6
+ attr_reader :error_code, :error_message
7
7
 
8
8
  attr_reader :coordinator_id, :coordinator_host, :coordinator_port
9
9
 
10
- def initialize(error_code:, coordinator_id:, coordinator_host:, coordinator_port:)
10
+ def initialize(error_code:, error_message:, coordinator_id:, coordinator_host:, coordinator_port:)
11
11
  @error_code = error_code
12
12
  @coordinator_id = coordinator_id
13
13
  @coordinator_host = coordinator_host
@@ -15,8 +15,10 @@ module Kafka
15
15
  end
16
16
 
17
17
  def self.decode(decoder)
18
+ _throttle_time_ms = decoder.int32
18
19
  new(
19
20
  error_code: decoder.int16,
21
+ error_message: decoder.string,
20
22
  coordinator_id: decoder.int32,
21
23
  coordinator_host: decoder.string,
22
24
  coordinator_port: decoder.int32,
@@ -33,20 +33,6 @@ module Kafka
33
33
  # Isr => [int32]
34
34
  #
35
35
  class MetadataResponse
36
- class BrokerInfo
37
- attr_reader :node_id, :host, :port
38
-
39
- def initialize(node_id:, host:, port:)
40
- @node_id = node_id
41
- @host = host
42
- @port = port
43
- end
44
-
45
- def to_s
46
- "#{host}:#{port} (node_id=#{node_id})"
47
- end
48
- end
49
-
50
36
  class PartitionMetadata
51
37
  attr_reader :partition_id, :leader
52
38
 
@@ -77,7 +63,7 @@ module Kafka
77
63
  end
78
64
  end
79
65
 
80
- # @return [Array<BrokerInfo>] the list of brokers in the cluster.
66
+ # @return [Array<Kafka::BrokerInfo>] the list of brokers in the cluster.
81
67
  attr_reader :brokers
82
68
 
83
69
  # @return [Array<TopicMetadata>] the list of topics in the cluster.
@@ -125,7 +111,7 @@ module Kafka
125
111
  # Finds the broker info for the given node id.
126
112
  #
127
113
  # @param node_id [Integer] the node id of the broker.
128
- # @return [BrokerInfo] information about the broker.
114
+ # @return [Kafka::BrokerInfo] information about the broker.
129
115
  def find_broker(node_id)
130
116
  broker = @brokers.find {|broker| broker.node_id == node_id }
131
117
 
@@ -134,6 +120,10 @@ module Kafka
134
120
  broker
135
121
  end
136
122
 
123
+ def controller_broker
124
+ find_broker(controller_id)
125
+ end
126
+
137
127
  def partitions_for(topic_name)
138
128
  topic = @topics.find {|t| t.topic_name == topic_name }
139
129
 
data/lib/kafka/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Kafka
4
- VERSION = "0.7.0.beta1"
4
+ VERSION = "0.7.0.beta2"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.0.beta1
4
+ version: 0.7.0.beta2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Daniel Schierbeck
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-06-21 00:00:00.000000000 Z
11
+ date: 2018-08-08 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: digest-crc
@@ -323,6 +323,7 @@ files:
323
323
  - lib/kafka.rb
324
324
  - lib/kafka/async_producer.rb
325
325
  - lib/kafka/broker.rb
326
+ - lib/kafka/broker_info.rb
326
327
  - lib/kafka/broker_pool.rb
327
328
  - lib/kafka/broker_uri.rb
328
329
  - lib/kafka/client.rb
@@ -370,8 +371,8 @@ files:
370
371
  - lib/kafka/protocol/encoder.rb
371
372
  - lib/kafka/protocol/fetch_request.rb
372
373
  - lib/kafka/protocol/fetch_response.rb
373
- - lib/kafka/protocol/group_coordinator_request.rb
374
- - lib/kafka/protocol/group_coordinator_response.rb
374
+ - lib/kafka/protocol/find_coordinator_request.rb
375
+ - lib/kafka/protocol/find_coordinator_response.rb
375
376
  - lib/kafka/protocol/heartbeat_request.rb
376
377
  - lib/kafka/protocol/heartbeat_response.rb
377
378
  - lib/kafka/protocol/join_group_request.rb
@@ -1,23 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Kafka
4
- module Protocol
5
- class GroupCoordinatorRequest
6
- def initialize(group_id:)
7
- @group_id = group_id
8
- end
9
-
10
- def api_key
11
- GROUP_COORDINATOR_API
12
- end
13
-
14
- def encode(encoder)
15
- encoder.write_string(@group_id)
16
- end
17
-
18
- def response_class
19
- GroupCoordinatorResponse
20
- end
21
- end
22
- end
23
- end