ruby-kafka 0.7.3 → 0.7.4

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d8ea1d759bb30b26f2130fd560d2f09bd46b0960add9a1e65460e9643cb50142
4
- data.tar.gz: 9816bf4c2c73567aa0a12fbc0cfe79af84861517a1845eaba5881ac05bd99f77
3
+ metadata.gz: 5f4441fd6947e65aaf63f57a7248000af3b1140eca6b33ed128cc51f24fa415b
4
+ data.tar.gz: ea4de0c4d19991767378cc4672bd7da47a3847b2931edd900d3d2f8e463bca0a
5
5
  SHA512:
6
- metadata.gz: c53dbab79d1157151328c2d718d1d2d2ba9600208419f5788c231df3cc63542f05bfe514cea199d72dde781eab252cf89fd9889f0e22c148afa210d1f499e28a
7
- data.tar.gz: 8a8bc9e3dc7d7edb465ebe990c4343330b03db21d595f93cb82698e7abe68166f132975e3e1ff95805b663c2d8227b87e74342b4efab412fda3c4d43be190a97
6
+ metadata.gz: ffd480a08e25377139e65aab9aeb0d8d85056f033142104ae6593b7b39bafbd9db0de3b4487224a608550b567b072f32b30e6d61c2f2bbcfdc47423d3aece3f8
7
+ data.tar.gz: b0acbf1f1024d50d1169a68316a67041a837965a76d79c0143228b5e142cdd412b4b9813257e6a40a400bf5a89a1fb70f246932ffb9f280e243848a73a66653c
@@ -4,6 +4,14 @@ Changes and additions to the library will be listed here.
4
4
 
5
5
  ## Unreleased
6
6
 
7
+ ## 0.7.4
8
+ - Fix wrong encoding calculation that leads to message corruption (#682, #680).
9
+ - Change the log level of the 'Committing offsets' message to debug (#640).
10
+ - Avoid Ruby warnings about unused vars (#679).
11
+ - Synchronously commit offsets after HeartbeatError (#676).
12
+ - Discard messages that were fetched under a previous consumer group generation (#665).
13
+ - Support specifying an ssl client certificates key passphrase (#667).
14
+
7
15
  ## 0.7.3
8
16
 
9
17
  - Synchronize access to @worker_thread and @timer_thread in AsyncProducer to prevent creating multiple threads (#661).
data/README.md CHANGED
@@ -921,6 +921,7 @@ kafka = Kafka.new(
921
921
  ssl_ca_cert: File.read('my_ca_cert.pem'),
922
922
  ssl_client_cert: File.read('my_client_cert.pem'),
923
923
  ssl_client_cert_key: File.read('my_client_cert_key.pem'),
924
+ ssl_client_cert_key_password: 'my_client_cert_key_password',
924
925
  # ...
925
926
  )
926
927
  ```
@@ -19,81 +19,102 @@ module Kafka
19
19
  end
20
20
  end
21
21
 
22
- # A fetch operation was executed with no partitions specified.
23
- class NoPartitionsToFetchFrom < Error
24
- end
25
-
26
- # A message in a partition is larger than the maximum we've asked for.
27
- class MessageTooLargeToRead < Error
28
- end
29
-
30
- # A connection has been unused for too long, we assume the server has killed it.
31
- class IdleConnection < Error
32
- end
33
-
34
22
  # Subclasses of this exception class map to an error code described in the
35
23
  # Kafka protocol specification.
36
- #
37
- # See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
24
+ # https://kafka.apache.org/protocol#protocol_error_codes
38
25
  class ProtocolError < Error
39
26
  end
40
27
 
41
- # This indicates that a message contents does not match its CRC.
42
- class CorruptMessage < ProtocolError
43
- end
44
-
45
- # When the record array length doesn't match real number of received records
46
- class InsufficientDataMessage < Error
47
- end
48
-
28
+ # -1
29
+ # The server experienced an unexpected error when processing the request
49
30
  class UnknownError < ProtocolError
50
31
  end
51
32
 
33
+ # 1
34
+ # The requested offset is not within the range of offsets maintained by the server.
52
35
  class OffsetOutOfRange < ProtocolError
53
36
  attr_accessor :topic, :partition, :offset
54
37
  end
55
38
 
39
+ # 2
40
+ # This indicates that a message contents does not match its CRC.
41
+ class CorruptMessage < ProtocolError
42
+ end
43
+
44
+ # 3
56
45
  # The request is for a topic or partition that does not exist on the broker.
57
46
  class UnknownTopicOrPartition < ProtocolError
58
47
  end
59
48
 
49
+ # 4
60
50
  # The message has a negative size.
61
51
  class InvalidMessageSize < ProtocolError
62
52
  end
63
53
 
54
+ # 5
64
55
  # This error is thrown if we are in the middle of a leadership election and
65
56
  # there is currently no leader for this partition and hence it is unavailable
66
57
  # for writes.
67
58
  class LeaderNotAvailable < ProtocolError
68
59
  end
69
60
 
61
+ # 6
70
62
  # This error is thrown if the client attempts to send messages to a replica
71
63
  # that is not the leader for some partition. It indicates that the client's
72
64
  # metadata is out of date.
73
65
  class NotLeaderForPartition < ProtocolError
74
66
  end
75
67
 
68
+ # 7
76
69
  # This error is thrown if the request exceeds the user-specified time limit
77
70
  # in the request.
78
71
  class RequestTimedOut < ProtocolError
79
72
  end
80
73
 
74
+ # 8
75
+ # The broker is not available.
81
76
  class BrokerNotAvailable < ProtocolError
82
77
  end
83
78
 
79
+ # 9
80
+ # Raised if a replica is expected on a broker, but is not. Can be safely ignored.
81
+ class ReplicaNotAvailable < ProtocolError
82
+ end
83
+
84
+ # 10
84
85
  # The server has a configurable maximum message size to avoid unbounded memory
85
86
  # allocation. This error is thrown if the client attempt to produce a message
86
87
  # larger than this maximum.
87
88
  class MessageSizeTooLarge < ProtocolError
88
89
  end
89
90
 
91
+ # 11
92
+ # The controller moved to another broker.
93
+ class StaleControllerEpoch < ProtocolError
94
+ end
95
+
96
+ # 12
90
97
  # If you specify a string larger than configured maximum for offset metadata.
91
98
  class OffsetMetadataTooLarge < ProtocolError
92
99
  end
93
100
 
101
+ # 13
102
+ # The server disconnected before a response was received.
103
+ class NetworkException < ProtocolError
104
+ end
105
+
106
+ # 14
107
+ # The coordinator is loading and hence can't process requests.
108
+ class CoordinatorLoadInProgress < ProtocolError
109
+ end
110
+
111
+ # 15
112
+ # The coordinator is not available.
94
113
  class CoordinatorNotAvailable < ProtocolError
95
114
  end
96
115
 
116
+ # 16
117
+ # This is not the correct coordinator.
97
118
  class NotCoordinatorForGroup < ProtocolError
98
119
  end
99
120
 
@@ -104,6 +125,7 @@ module Kafka
104
125
  class InvalidTopic < ProtocolError
105
126
  end
106
127
 
128
+ # 18
107
129
  # If a message batch in a produce request exceeds the maximum configured
108
130
  # segment size.
109
131
  class RecordListTooLarge < ProtocolError
@@ -127,28 +149,38 @@ module Kafka
127
149
  class InvalidRequiredAcks < ProtocolError
128
150
  end
129
151
 
130
- # 9
131
- # Raised if a replica is expected on a broker, but is not. Can be safely ignored.
132
- class ReplicaNotAvailable < ProtocolError
152
+ # 22
153
+ # Specified group generation id is not valid.
154
+ class IllegalGeneration < ProtocolError
133
155
  end
134
156
 
135
- # 25
136
- class UnknownMemberId < ProtocolError
157
+ # 23
158
+ # The group member's supported protocols are incompatible with those of existing members or first group member tried to join with empty protocol type or empty protocol list.
159
+ class InconsistentGroupProtocol < ProtocolError
137
160
  end
138
161
 
139
- # 27
140
- class RebalanceInProgress < ProtocolError
162
+ # 24
163
+ # The configured groupId is invalid
164
+ class InvalidGroupId < ProtocolError
141
165
  end
142
166
 
143
- # 22
144
- class IllegalGeneration < ProtocolError
167
+ # 25
168
+ # The coordinator is not aware of this member.
169
+ class UnknownMemberId < ProtocolError
145
170
  end
146
171
 
147
172
  # 26
173
+ # The session timeout is not within the range allowed by the broker
148
174
  class InvalidSessionTimeout < ProtocolError
149
175
  end
150
176
 
177
+ # 27
178
+ # The group is rebalancing, so a rejoin is needed.
179
+ class RebalanceInProgress < ProtocolError
180
+ end
181
+
151
182
  # 28
183
+ # The committing offset data size is not valid
152
184
  class InvalidCommitOffsetSize < ProtocolError
153
185
  end
154
186
 
@@ -165,10 +197,12 @@ module Kafka
165
197
  end
166
198
 
167
199
  # 32
200
+ # The timestamp of the message is out of acceptable range.
168
201
  class InvalidTimestamp < ProtocolError
169
202
  end
170
203
 
171
204
  # 33
205
+ # The broker does not support the requested SASL mechanism.
172
206
  class UnsupportedSaslMechanism < ProtocolError
173
207
  end
174
208
 
@@ -185,10 +219,12 @@ module Kafka
185
219
  end
186
220
 
187
221
  # 37
222
+ # Number of partitions is below 1.
188
223
  class InvalidPartitions < ProtocolError
189
224
  end
190
225
 
191
226
  # 38
227
+ # Replication factor is below 1 or larger than the number of available brokers.
192
228
  class InvalidReplicationFactor < ProtocolError
193
229
  end
194
230
 
@@ -201,6 +237,7 @@ module Kafka
201
237
  end
202
238
 
203
239
  # 41
240
+ # This is not the correct controller for this cluster.
204
241
  class NotController < ProtocolError
205
242
  end
206
243
 
@@ -208,6 +245,75 @@ module Kafka
208
245
  class InvalidRequest < ProtocolError
209
246
  end
210
247
 
248
+ # 43
249
+ # The message format version on the broker does not support the request.
250
+ class UnsupportedForMessageFormat < ProtocolError
251
+ end
252
+
253
+ # 44
254
+ # Request parameters do not satisfy the configured policy.
255
+ class PolicyViolation < ProtocolError
256
+ end
257
+
258
+ # 45
259
+ # The broker received an out of order sequence number
260
+ class OutOfOrderSequenceNumberError < Error
261
+ end
262
+
263
+ # 46
264
+ # The broker received a duplicate sequence number
265
+ class DuplicateSequenceNumberError < Error
266
+ end
267
+
268
+ # 47
269
+ # Producer attempted an operation with an old epoch. Either there is a newer producer with the same transactionalId, or the producer's transaction has been expired by the broker.
270
+ class InvalidProducerEpochError < Error
271
+ end
272
+
273
+ # 48
274
+ # The producer attempted a transactional operation in an invalid state
275
+ class InvalidTxnStateError < Error
276
+ end
277
+
278
+ # 49
279
+ # The producer attempted to use a producer id which is not currently assigned to its transactional id
280
+ class InvalidProducerIDMappingError < Error
281
+ end
282
+
283
+ # 50
284
+ # The transaction timeout is larger than the maximum value allowed by the broker (as configured by transaction.max.timeout.ms).
285
+ class InvalidTransactionTimeoutError < Error
286
+ end
287
+
288
+ # 51
289
+ # The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing
290
+ class ConcurrentTransactionError < Error
291
+ end
292
+
293
+ # 52
294
+ # Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer
295
+ class TransactionCoordinatorFencedError < Error
296
+ end
297
+
298
+ ###
299
+ # ruby-kafka errors
300
+ ###
301
+
302
+ # A fetch operation was executed with no partitions specified.
303
+ class NoPartitionsToFetchFrom < Error
304
+ end
305
+
306
+ # A message in a partition is larger than the maximum we've asked for.
307
+ class MessageTooLargeToRead < Error
308
+ end
309
+
310
+ # A connection has been unused for too long, we assume the server has killed it.
311
+ class IdleConnection < Error
312
+ end
313
+
314
+ # When the record array length doesn't match real number of received records
315
+ class InsufficientDataMessage < Error
316
+ end
211
317
  # Raised when there's a network connection error.
212
318
  class ConnectionError < Error
213
319
  end
@@ -245,38 +351,6 @@ module Kafka
245
351
  class FailedScramAuthentication < SaslScramError
246
352
  end
247
353
 
248
- # The broker received an out of order sequence number
249
- class OutOfOrderSequenceNumberError < Error
250
- end
251
-
252
- # The broker received a duplicate sequence number
253
- class DuplicateSequenceNumberError < Error
254
- end
255
-
256
- # Producer attempted an operation with an old epoch. Either there is a newer producer with the same transactionalId, or the producer's transaction has been expired by the broker.
257
- class InvalidProducerEpochError < Error
258
- end
259
-
260
- # The producer attempted a transactional operation in an invalid state
261
- class InvalidTxnStateError < Error
262
- end
263
-
264
- # The producer attempted to use a producer id which is not currently assigned to its transactional id
265
- class InvalidProducerIDMappingError < Error
266
- end
267
-
268
- # The transaction timeout is larger than the maximum value allowed by the broker (as configured by transaction.max.timeout.ms).
269
- class InvalidTransactionTimeoutError < Error
270
- end
271
-
272
- # The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing
273
- class ConcurrentTransactionError < Error
274
- end
275
-
276
- # Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer
277
- class TransactionCoordinatorFencedError < Error
278
- end
279
-
280
354
  # Initializes a new Kafka client.
281
355
  #
282
356
  # @see Client#initialize
@@ -46,6 +46,9 @@ module Kafka
46
46
  # @param ssl_client_cert_key [String, nil] a PEM encoded client cert key to use with an
47
47
  # SSL connection. Must be used in combination with ssl_client_cert.
48
48
  #
49
+ # @param ssl_client_cert_key_password [String, nil] the password required to read the
50
+ # ssl_client_cert_key. Must be used in combination with ssl_client_cert_key.
51
+ #
49
52
  # @param sasl_gssapi_principal [String, nil] a KRB5 principal
50
53
  #
51
54
  # @param sasl_gssapi_keytab [String, nil] a KRB5 keytab filepath
@@ -61,8 +64,8 @@ module Kafka
61
64
  # @return [Client]
62
65
  def initialize(seed_brokers:, client_id: "ruby-kafka", logger: nil, connect_timeout: nil, socket_timeout: nil,
63
66
  ssl_ca_cert_file_path: nil, ssl_ca_cert: nil, ssl_client_cert: nil, ssl_client_cert_key: nil,
64
- ssl_client_cert_chain: nil, sasl_gssapi_principal: nil, sasl_gssapi_keytab: nil,
65
- sasl_plain_authzid: '', sasl_plain_username: nil, sasl_plain_password: nil,
67
+ ssl_client_cert_key_password: nil, ssl_client_cert_chain: nil, sasl_gssapi_principal: nil,
68
+ sasl_gssapi_keytab: nil, sasl_plain_authzid: '', sasl_plain_username: nil, sasl_plain_password: nil,
66
69
  sasl_scram_username: nil, sasl_scram_password: nil, sasl_scram_mechanism: nil,
67
70
  sasl_over_ssl: true, ssl_ca_certs_from_system: false)
68
71
  @logger = logger || Logger.new(nil)
@@ -74,6 +77,7 @@ module Kafka
74
77
  ca_cert: ssl_ca_cert,
75
78
  client_cert: ssl_client_cert,
76
79
  client_cert_key: ssl_client_cert_key,
80
+ client_cert_key_password: ssl_client_cert_key_password,
77
81
  client_cert_chain: ssl_client_cert_chain,
78
82
  ca_certs_from_system: ssl_ca_certs_from_system,
79
83
  )
@@ -394,7 +394,10 @@ module Kafka
394
394
  @instrumenter.instrument("loop.consumer") do
395
395
  yield
396
396
  end
397
- rescue HeartbeatError, OffsetCommitError
397
+ rescue HeartbeatError
398
+ make_final_offsets_commit!
399
+ join_group
400
+ rescue OffsetCommitError
398
401
  join_group
399
402
  rescue RebalanceInProgress
400
403
  @logger.warn "Group rebalance in progress, re-joining..."
@@ -28,6 +28,10 @@ module Kafka
28
28
 
29
29
  # The maximum number of bytes to fetch per partition, by topic.
30
30
  @max_bytes_per_partition = {}
31
+
32
+ # An incrementing counter used to synchronize resets between the
33
+ # foreground and background thread.
34
+ @current_reset_counter = 0
31
35
  end
32
36
 
33
37
  def subscribe(topic, max_bytes_per_partition:)
@@ -62,7 +66,8 @@ module Kafka
62
66
  end
63
67
 
64
68
  def reset
65
- @commands << [:reset, []]
69
+ @current_reset_counter = current_reset_counter + 1
70
+ @commands << [:reset]
66
71
  end
67
72
 
68
73
  def data?
@@ -70,11 +75,23 @@ module Kafka
70
75
  end
71
76
 
72
77
  def poll
73
- @queue.deq
78
+ tag, message, reset_counter = @queue.deq
79
+
80
+ # Batches are tagged with the current reset counter value. If the batch
81
+ # has a reset_counter < current_reset_counter, we know it was fetched
82
+ # prior to the most recent reset and should be discarded.
83
+ if tag == :batches && message.any? && current_reset_counter > reset_counter
84
+ @logger.warn "Skipping stale messages buffered prior to reset"
85
+ return tag, []
86
+ end
87
+
88
+ return [tag, message]
74
89
  end
75
90
 
76
91
  private
77
92
 
93
+ attr_reader :current_reset_counter
94
+
78
95
  def loop
79
96
  @instrumenter.instrument("loop.fetcher", {
80
97
  queue_size: @queue.size,
@@ -149,7 +166,7 @@ module Kafka
149
166
  @next_offsets[batch.topic][batch.partition] = batch.last_offset + 1 unless batch.unknown_last_offset?
150
167
  end
151
168
 
152
- @queue << [:batches, batches]
169
+ @queue << [:batches, batches, current_reset_counter]
153
170
  rescue Kafka::NoPartitionsToFetchFrom
154
171
  @logger.warn "No partitions to fetch from, sleeping for 1s"
155
172
  sleep 1
@@ -120,7 +120,7 @@ module Kafka
120
120
  def commit_offsets(recommit = false)
121
121
  offsets = offsets_to_commit(recommit)
122
122
  unless offsets.empty?
123
- @logger.info "Committing offsets#{recommit ? ' with recommit' : ''}: #{prettify_offsets(offsets)}"
123
+ @logger.debug "Committing offsets#{recommit ? ' with recommit' : ''}: #{prettify_offsets(offsets)}"
124
124
 
125
125
  @group.commit_offsets(offsets)
126
126
 
@@ -75,7 +75,10 @@ module Kafka
75
75
  8 => BrokerNotAvailable,
76
76
  9 => ReplicaNotAvailable,
77
77
  10 => MessageSizeTooLarge,
78
+ 11 => StaleControllerEpoch,
78
79
  12 => OffsetMetadataTooLarge,
80
+ 13 => NetworkException,
81
+ 14 => CoordinatorLoadInProgress,
79
82
  15 => CoordinatorNotAvailable,
80
83
  16 => NotCoordinatorForGroup,
81
84
  17 => InvalidTopic,
@@ -84,6 +87,8 @@ module Kafka
84
87
  20 => NotEnoughReplicasAfterAppend,
85
88
  21 => InvalidRequiredAcks,
86
89
  22 => IllegalGeneration,
90
+ 23 => InconsistentGroupProtocol,
91
+ 24 => InvalidGroupId,
87
92
  25 => UnknownMemberId,
88
93
  26 => InvalidSessionTimeout,
89
94
  27 => RebalanceInProgress,
@@ -102,6 +107,8 @@ module Kafka
102
107
  40 => InvalidConfig,
103
108
  41 => NotController,
104
109
  42 => InvalidRequest,
110
+ 43 => UnsupportedForMessageFormat,
111
+ 44 => PolicyViolation,
105
112
  45 => OutOfOrderSequenceNumberError,
106
113
  46 => DuplicateSequenceNumberError,
107
114
  47 => InvalidProducerEpochError,
@@ -45,7 +45,7 @@ module Kafka
45
45
  member_id = decoder.string
46
46
  client_id = decoder.string
47
47
  client_host = decoder.string
48
- metadata = decoder.bytes
48
+ _metadata = decoder.bytes
49
49
  assignment = MemberAssignment.decode(Decoder.from_string(decoder.bytes))
50
50
 
51
51
  Member.new(
@@ -133,7 +133,7 @@ module Kafka
133
133
  int = ~int | 1 if int < 0
134
134
 
135
135
  chunks = []
136
- while int & 0xff80 != 0
136
+ while int >> 7 != 0
137
137
  chunks << (int & 0x7f | 0x80)
138
138
  int >>= 7
139
139
  end
@@ -67,7 +67,7 @@ module Kafka
67
67
  offset = decoder.int64
68
68
  message_decoder = Decoder.from_string(decoder.bytes)
69
69
 
70
- crc = message_decoder.int32
70
+ _crc = message_decoder.int32
71
71
  magic_byte = message_decoder.int8
72
72
  attributes = message_decoder.int8
73
73
 
@@ -113,7 +113,7 @@ module Kafka
113
113
  # @param node_id [Integer] the node id of the broker.
114
114
  # @return [Kafka::BrokerInfo] information about the broker.
115
115
  def find_broker(node_id)
116
- broker = @brokers.find {|broker| broker.node_id == node_id }
116
+ broker = @brokers.find {|b| b.node_id == node_id }
117
117
 
118
118
  raise Kafka::NoSuchBroker, "No broker with id #{node_id}" if broker.nil?
119
119
 
@@ -145,7 +145,7 @@ module Kafka
145
145
  node_id = decoder.int32
146
146
  host = decoder.string
147
147
  port = decoder.int32
148
- rack = decoder.string
148
+ _rack = decoder.string
149
149
 
150
150
  BrokerInfo.new(
151
151
  node_id: node_id,
@@ -159,7 +159,7 @@ module Kafka
159
159
  topics = decoder.array do
160
160
  topic_error_code = decoder.int16
161
161
  topic_name = decoder.string
162
- is_internal = decoder.boolean
162
+ _is_internal = decoder.boolean
163
163
 
164
164
  partitions = decoder.array do
165
165
  PartitionMetadata.new(
@@ -6,15 +6,20 @@ module Kafka
6
6
  module SslContext
7
7
  CLIENT_CERT_DELIMITER = "\n-----END CERTIFICATE-----\n"
8
8
 
9
- def self.build(ca_cert_file_path: nil, ca_cert: nil, client_cert: nil, client_cert_key: nil, client_cert_chain: nil, ca_certs_from_system: nil)
10
- return nil unless ca_cert_file_path || ca_cert || client_cert || client_cert_key || client_cert_chain || ca_certs_from_system
9
+ def self.build(ca_cert_file_path: nil, ca_cert: nil, client_cert: nil, client_cert_key: nil, client_cert_key_password: nil, client_cert_chain: nil, ca_certs_from_system: nil)
10
+ return nil unless ca_cert_file_path || ca_cert || client_cert || client_cert_key || client_cert_key_password || client_cert_chain || ca_certs_from_system
11
11
 
12
12
  ssl_context = OpenSSL::SSL::SSLContext.new
13
13
 
14
14
  if client_cert && client_cert_key
15
+ if client_cert_key_password
16
+ cert_key = OpenSSL::PKey.read(client_cert_key, client_cert_key_password)
17
+ else
18
+ cert_key = OpenSSL::PKey.read(client_cert_key)
19
+ end
15
20
  context_params = {
16
21
  cert: OpenSSL::X509::Certificate.new(client_cert),
17
- key: OpenSSL::PKey.read(client_cert_key),
22
+ key: cert_key
18
23
  }
19
24
  if client_cert_chain
20
25
  certs = []
@@ -33,6 +38,8 @@ module Kafka
33
38
  raise ArgumentError, "Kafka client initialized with `ssl_client_cert_chain`, but no `ssl_client_cert`. Please provide cert, key and chain."
34
39
  elsif client_cert_chain && !client_cert_key
35
40
  raise ArgumentError, "Kafka client initialized with `ssl_client_cert_chain`, but no `ssl_client_cert_key`. Please provide cert, key and chain."
41
+ elsif client_cert_key_password && !client_cert_key
42
+ raise ArgumentError, "Kafka client initialized with `ssl_client_cert_key_password`, but no `ssl_client_cert_key`. Please provide both."
36
43
  end
37
44
 
38
45
  if ca_cert || ca_cert_file_path || ca_certs_from_system
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Kafka
4
- VERSION = "0.7.3"
4
+ VERSION = "0.7.4"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.3
4
+ version: 0.7.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Daniel Schierbeck
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-10-15 00:00:00.000000000 Z
11
+ date: 2018-11-13 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: digest-crc