karafka-rdkafka 0.13.5 → 0.13.6

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 460d2befd6142375c9e19c1874c2c44e749e48d91119395a0e3a2dcee37ddf67
4
- data.tar.gz: ca6a6f51754841d6b8bce3cd63d644242e28231c3244969264df0f00061ceae6
3
+ metadata.gz: 4da2961103e92a94c36f605359cc5380ffa6d740cdc667329797ce9345caa663
4
+ data.tar.gz: 419aff211c5bfe35c244549fa14e042e92a39ad8583af2b8844283b7f1dca5b0
5
5
  SHA512:
6
- metadata.gz: '09f5ee7ad4eeaed20631d12810948defc1812bce1962bdf018fc792db6fb59b82431fdc69bcea76968edd7a40b1c4537d01900d1eb084190b607c9682084517e'
7
- data.tar.gz: 5d525cb93564d1bedde6d4bd6f7022bf064610cab1a8cb6ad676c0fe2097946a160a87f6e3227c73b3bbd7804802130c56a17fef28cb3c06db9d796f62369714
6
+ metadata.gz: c0b0d1069163ece1dcf5734ebc80e952a86c3eccc96885f856dc3f4569a0aaddc12db28fda7927d4c360b6f2ba0078a782f7ee6582554159c0e0b80c975e03bf
7
+ data.tar.gz: 2e3ba6a6ef850bafa305f2d9f2a9d3b4268fedbafc50f2b5ce2fedffcb4bb018b8a1ed5b1ce1b3a1efbad51621a2ad74d448641774d93879da628c06d4a1260c
checksums.yaml.gz.sig CHANGED
Binary file
@@ -20,6 +20,7 @@ jobs:
20
20
  fail-fast: false
21
21
  matrix:
22
22
  ruby:
23
+ - '3.3.0-preview2'
23
24
  - '3.2'
24
25
  - '3.1'
25
26
  - '3.1.0'
@@ -36,6 +37,10 @@ jobs:
36
37
  - name: Install package dependencies
37
38
  run: "[ -e $APT_DEPS ] || sudo apt-get install -y --no-install-recommends $APT_DEPS"
38
39
 
40
+ - name: Start Kafka with docker-compose
41
+ run: |
42
+ docker-compose up -d || (sleep 5 && docker-compose up -d)
43
+
39
44
  - name: Set up Ruby
40
45
  uses: ruby/setup-ruby@v1
41
46
  with:
@@ -47,7 +52,6 @@ jobs:
47
52
  GITHUB_COVERAGE: ${{matrix.coverage}}
48
53
 
49
54
  run: |
50
- docker-compose up -d --no-recreate
51
55
  bundle install --path vendor/bundle
52
56
  cd ext && bundle exec rake && cd ..
53
57
  bundle exec rspec
data/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ # 0.13.6 (2023-10-17)
2
+ * **[Feature]** Support transactions API in the producer
3
+ * [Enhancement] Add `raise_response_error` flag to the `Rdkafka::AbstractHandle`.
4
+ * [Enhancement] Provide `#purge` to remove any outstanding requests from the producer.
5
+ * [Enhancement] Fix `#flush` does not handle the timeouts errors by making it return true if all flushed or false if failed. We do **not** raise an exception here to keep it backwards compatible.
6
+
1
7
  # 0.13.5
2
8
  * Fix DeliveryReport `create_result#error` being nil despite an error being associated with it
3
9
 
data/docker-compose.yml CHANGED
@@ -1,24 +1,25 @@
1
- ---
2
-
3
1
  version: '2'
4
2
 
5
3
  services:
6
- zookeeper:
7
- image: confluentinc/cp-zookeeper:5.2.6
8
- environment:
9
- ZOOKEEPER_CLIENT_PORT: 2181
10
- ZOOKEEPER_TICK_TIME: 2000
11
-
12
4
  kafka:
13
- image: confluentinc/cp-kafka:5.2.5-10
14
- depends_on:
15
- - zookeeper
5
+ container_name: kafka
6
+ image: confluentinc/cp-kafka:7.5.0
7
+
16
8
  ports:
17
9
  - 9092:9092
10
+
18
11
  environment:
19
- KAFKA_BROKER_ID: 1
20
- KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
21
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:29092,PLAINTEXT_HOST://localhost:9092
22
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
12
+ CLUSTER_ID: kafka-docker-cluster-1
23
13
  KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
24
14
  KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
15
+ KAFKA_PROCESS_ROLES: broker,controller
16
+ KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
17
+ KAFKA_LISTENERS: PLAINTEXT://:9092,CONTROLLER://:9093
18
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT
19
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://127.0.0.1:9092
20
+ KAFKA_BROKER_ID: 1
21
+ KAFKA_CONTROLLER_QUORUM_VOTERS: 1@127.0.0.1:9093
22
+ ALLOW_PLAINTEXT_LISTENER: 'yes'
23
+ KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
24
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
25
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
@@ -37,12 +37,13 @@ module Rdkafka
37
37
  #
38
38
  # @param max_wait_timeout [Numeric, nil] Amount of time to wait before timing out. If this is nil it does not time out.
39
39
  # @param wait_timeout [Numeric] Amount of time we should wait before we recheck if the operation has completed
40
+ # @param raise_response_error [Boolean] should we raise error when waiting finishes
40
41
  #
41
42
  # @raise [RdkafkaError] When the operation failed
42
43
  # @raise [WaitTimeoutError] When the timeout has been reached and the handle is still pending
43
44
  #
44
45
  # @return [Object] Operation-specific result
45
- def wait(max_wait_timeout: 60, wait_timeout: 0.1)
46
+ def wait(max_wait_timeout: 60, wait_timeout: 0.1, raise_response_error: true)
46
47
  timeout = if max_wait_timeout
47
48
  CURRENT_TIME.call + max_wait_timeout
48
49
  else
@@ -54,7 +55,7 @@ module Rdkafka
54
55
  raise WaitTimeoutError.new("Waiting for #{operation_name} timed out after #{max_wait_timeout} seconds")
55
56
  end
56
57
  sleep wait_timeout
57
- elsif self[:response] != 0
58
+ elsif self[:response] != 0 && raise_response_error
58
59
  raise_error
59
60
  else
60
61
  return create_result
@@ -74,7 +75,7 @@ module Rdkafka
74
75
 
75
76
  # Allow subclasses to override
76
77
  def raise_error
77
- raise RdkafkaError.new(self[:response])
78
+ RdkafkaError.validate!(self[:response])
78
79
  end
79
80
 
80
81
  # Error that is raised when waiting for the handle to complete
@@ -17,9 +17,12 @@ module Rdkafka
17
17
  end
18
18
 
19
19
  def raise_error
20
- raise RdkafkaError.new(
21
- self[:response],
22
- broker_message: CreatePartitionsReport.new(self[:error_string], self[:result_name]).error_string
20
+ RdkafkaError.validate!(
21
+ self[:response],
22
+ broker_message: CreatePartitionsReport.new(
23
+ self[:error_string],
24
+ self[:result_name]
25
+ ).error_string
23
26
  )
24
27
  end
25
28
  end
@@ -19,9 +19,12 @@ module Rdkafka
19
19
  end
20
20
 
21
21
  def raise_error
22
- raise RdkafkaError.new(
23
- self[:response],
24
- broker_message: CreateTopicReport.new(self[:error_string], self[:result_name]).error_string
22
+ RdkafkaError.validate!(
23
+ self[:response],
24
+ broker_message: CreateTopicReport.new(
25
+ self[:error_string],
26
+ self[:result_name]
27
+ ).error_string
25
28
  )
26
29
  end
27
30
  end
@@ -19,9 +19,12 @@ module Rdkafka
19
19
  end
20
20
 
21
21
  def raise_error
22
- raise RdkafkaError.new(
23
- self[:response],
24
- broker_message: DeleteTopicReport.new(self[:error_string], self[:result_name]).error_string
22
+ RdkafkaError.validate!(
23
+ self[:response],
24
+ broker_message: DeleteTopicReport.new(
25
+ self[:error_string],
26
+ self[:result_name]
27
+ ).error_string
25
28
  )
26
29
  end
27
30
  end
@@ -6,6 +6,15 @@ require "logger"
6
6
 
7
7
  module Rdkafka
8
8
  # @private
9
+ #
10
+ # @note
11
+ # There are two types of responses related to errors:
12
+ # - rd_kafka_error_t - a C object that we need to remap into an error or null when no error
13
+ # - rd_kafka_resp_err_t - response error code (numeric) that we can use directly
14
+ #
15
+ # It is critical to ensure, that we handle them correctly. The result type should be:
16
+ # - rd_kafka_error_t - :pointer
17
+ # - rd_kafka_resp_err_t - :int
9
18
  module Bindings
10
19
  extend FFI::Library
11
20
 
@@ -35,7 +44,7 @@ module Rdkafka
35
44
 
36
45
  # Polling
37
46
 
38
- attach_function :rd_kafka_flush, [:pointer, :int], :void, blocking: true
47
+ attach_function :rd_kafka_flush, [:pointer, :int], :int, blocking: true
39
48
  attach_function :rd_kafka_poll, [:pointer, :int], :void, blocking: true
40
49
  attach_function :rd_kafka_outq_len, [:pointer], :int, blocking: true
41
50
 
@@ -96,6 +105,11 @@ module Rdkafka
96
105
 
97
106
  attach_function :rd_kafka_err2name, [:int], :string
98
107
  attach_function :rd_kafka_err2str, [:int], :string
108
+ attach_function :rd_kafka_error_is_fatal, [:pointer], :int
109
+ attach_function :rd_kafka_error_is_retriable, [:pointer], :int
110
+ attach_function :rd_kafka_error_txn_requires_abort, [:pointer], :int
111
+ attach_function :rd_kafka_error_destroy, [:pointer], :void
112
+ attach_function :rd_kafka_error_code, [:pointer], :int
99
113
 
100
114
  # Configuration
101
115
 
@@ -157,7 +171,7 @@ module Rdkafka
157
171
  :void, [:pointer, :int, :string, :pointer]
158
172
  ) do |_client_prr, err_code, reason, _opaque|
159
173
  if Rdkafka::Config.error_callback
160
- error = Rdkafka::RdkafkaError.new(err_code, broker_message: reason)
174
+ error = Rdkafka::RdkafkaError.build(err_code, broker_message: reason)
161
175
  error.set_backtrace(caller)
162
176
  Rdkafka::Config.error_callback.call(error)
163
177
  end
@@ -255,12 +269,19 @@ module Rdkafka
255
269
  RD_KAFKA_VTYPE_TIMESTAMP = 8
256
270
  RD_KAFKA_VTYPE_HEADER = 9
257
271
  RD_KAFKA_VTYPE_HEADERS = 10
272
+ RD_KAFKA_PURGE_F_QUEUE = 1
273
+ RD_KAFKA_PURGE_F_INFLIGHT = 2
258
274
 
259
275
  RD_KAFKA_MSG_F_COPY = 0x2
260
276
 
261
277
  attach_function :rd_kafka_producev, [:pointer, :varargs], :int, blocking: true
278
+ attach_function :rd_kafka_purge, [:pointer, :int], :int, blocking: true
262
279
  callback :delivery_cb, [:pointer, :pointer, :pointer], :void
263
280
  attach_function :rd_kafka_conf_set_dr_msg_cb, [:pointer, :delivery_cb], :void
281
+ attach_function :rd_kafka_init_transactions, [:pointer, :int], :pointer, blocking: true
282
+ attach_function :rd_kafka_begin_transaction, [:pointer], :pointer, blocking: true
283
+ attach_function :rd_kafka_abort_transaction, [:pointer, :int], :pointer, blocking: true
284
+ attach_function :rd_kafka_commit_transaction, [:pointer, :int], :pointer, blocking: true
264
285
 
265
286
  # Partitioner
266
287
  PARTITIONERS = %w(random consistent consistent_random murmur2 murmur2_random fnv1a fnv1a_random).each_with_object({}) do |name, hsh|
@@ -68,9 +68,8 @@ module Rdkafka
68
68
  response = @native_kafka.with_inner do |inner|
69
69
  Rdkafka::Bindings.rd_kafka_subscribe(inner, tpl)
70
70
  end
71
- if response != 0
72
- raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
73
- end
71
+
72
+ Rdkafka::RdkafkaError.validate!(response, "Error subscribing to '#{topics.join(', ')}'")
74
73
  ensure
75
74
  Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) unless tpl.nil?
76
75
  end
@@ -86,9 +85,10 @@ module Rdkafka
86
85
  response = @native_kafka.with_inner do |inner|
87
86
  Rdkafka::Bindings.rd_kafka_unsubscribe(inner)
88
87
  end
89
- if response != 0
90
- raise Rdkafka::RdkafkaError.new(response)
91
- end
88
+
89
+ Rdkafka::RdkafkaError.validate!(response)
90
+
91
+ nil
92
92
  end
93
93
 
94
94
  # Pause producing or consumption for the provided list of partitions
@@ -141,9 +141,10 @@ module Rdkafka
141
141
  response = @native_kafka.with_inner do |inner|
142
142
  Rdkafka::Bindings.rd_kafka_resume_partitions(inner, tpl)
143
143
  end
144
- if response != 0
145
- raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
146
- end
144
+
145
+ Rdkafka::RdkafkaError.validate!(response, "Error resume '#{list.to_h}'")
146
+
147
+ nil
147
148
  ensure
148
149
  Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
149
150
  end
@@ -162,9 +163,7 @@ module Rdkafka
162
163
  Rdkafka::Bindings.rd_kafka_subscription(inner, ptr)
163
164
  end
164
165
 
165
- if response != 0
166
- raise Rdkafka::RdkafkaError.new(response)
167
- end
166
+ Rdkafka::RdkafkaError.validate!(response)
168
167
 
169
168
  native = ptr.read_pointer
170
169
 
@@ -193,9 +192,8 @@ module Rdkafka
193
192
  response = @native_kafka.with_inner do |inner|
194
193
  Rdkafka::Bindings.rd_kafka_assign(inner, tpl)
195
194
  end
196
- if response != 0
197
- raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
198
- end
195
+
196
+ Rdkafka::RdkafkaError.validate!(response, "Error assigning '#{list.to_h}'")
199
197
  ensure
200
198
  Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
201
199
  end
@@ -213,9 +211,8 @@ module Rdkafka
213
211
  response = @native_kafka.with_inner do |inner|
214
212
  Rdkafka::Bindings.rd_kafka_assignment(inner, ptr)
215
213
  end
216
- if response != 0
217
- raise Rdkafka::RdkafkaError.new(response)
218
- end
214
+
215
+ Rdkafka::RdkafkaError.validate!(response)
219
216
 
220
217
  tpl = ptr.read_pointer
221
218
 
@@ -263,9 +260,9 @@ module Rdkafka
263
260
  response = @native_kafka.with_inner do |inner|
264
261
  Rdkafka::Bindings.rd_kafka_committed(inner, tpl, timeout_ms)
265
262
  end
266
- if response != 0
267
- raise Rdkafka::RdkafkaError.new(response)
268
- end
263
+
264
+ Rdkafka::RdkafkaError.validate!(response)
265
+
269
266
  TopicPartitionList.from_native_tpl(tpl)
270
267
  ensure
271
268
  Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
@@ -297,9 +294,8 @@ module Rdkafka
297
294
  timeout_ms,
298
295
  )
299
296
  end
300
- if response != 0
301
- raise Rdkafka::RdkafkaError.new(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
302
- end
297
+
298
+ Rdkafka::RdkafkaError.validate!(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
303
299
 
304
300
  return low.read_array_of_int64(1).first, high.read_array_of_int64(1).first
305
301
  ensure
@@ -387,9 +383,10 @@ module Rdkafka
387
383
  message.partition,
388
384
  message.offset
389
385
  )
390
- if response != 0
391
- raise Rdkafka::RdkafkaError.new(response)
392
- end
386
+
387
+ Rdkafka::RdkafkaError.validate!(response)
388
+
389
+ nil
393
390
  ensure
394
391
  if native_topic && !native_topic.null?
395
392
  Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
@@ -422,9 +419,9 @@ module Rdkafka
422
419
  message.offset,
423
420
  0 # timeout
424
421
  )
425
- if response != 0
426
- raise Rdkafka::RdkafkaError.new(response)
427
- end
422
+ Rdkafka::RdkafkaError.validate!(response)
423
+
424
+ nil
428
425
  ensure
429
426
  if native_topic && !native_topic.null?
430
427
  Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
@@ -455,9 +452,7 @@ module Rdkafka
455
452
  )
456
453
  end
457
454
 
458
- if response != 0
459
- raise Rdkafka::RdkafkaError.new(response)
460
- end
455
+ Rdkafka::RdkafkaError.validate!(response)
461
456
 
462
457
  TopicPartitionList.from_native_tpl(tpl)
463
458
  ensure
@@ -492,9 +487,10 @@ module Rdkafka
492
487
  response = @native_kafka.with_inner do |inner|
493
488
  Rdkafka::Bindings.rd_kafka_commit(inner, tpl, async)
494
489
  end
495
- if response != 0
496
- raise Rdkafka::RdkafkaError.new(response)
497
- end
490
+
491
+ Rdkafka::RdkafkaError.validate!(response)
492
+
493
+ nil
498
494
  ensure
499
495
  Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
500
496
  end
@@ -519,9 +515,9 @@ module Rdkafka
519
515
  # Create struct wrapper
520
516
  native_message = Rdkafka::Bindings::Message.new(message_ptr)
521
517
  # Raise error if needed
522
- if native_message[:err] != 0
523
- raise Rdkafka::RdkafkaError.new(native_message[:err])
524
- end
518
+
519
+ Rdkafka::RdkafkaError.validate!(native_message[:err])
520
+
525
521
  # Create a message to pass out
526
522
  Rdkafka::Consumer::Message.new(native_message)
527
523
  end
data/lib/rdkafka/error.rb CHANGED
@@ -18,12 +18,59 @@ module Rdkafka
18
18
  # @return [String]
19
19
  attr_reader :broker_message
20
20
 
21
+ class << self
22
+ def build_from_c(response_ptr, message_prefix = nil, broker_message: nil)
23
+ code = Rdkafka::Bindings.rd_kafka_error_code(response_ptr)
24
+
25
+ return false if code.zero?
26
+
27
+ message = broker_message || Rdkafka::Bindings.rd_kafka_err2str(code)
28
+ fatal = !Rdkafka::Bindings.rd_kafka_error_is_fatal(response_ptr).zero?
29
+ retryable = !Rdkafka::Bindings.rd_kafka_error_is_retriable(response_ptr).zero?
30
+ abortable = !Rdkafka::Bindings.rd_kafka_error_txn_requires_abort(response_ptr).zero?
31
+
32
+ Rdkafka::Bindings.rd_kafka_error_destroy(response_ptr)
33
+
34
+ new(
35
+ code,
36
+ message_prefix,
37
+ broker_message: message,
38
+ fatal: fatal,
39
+ retryable: retryable,
40
+ abortable: abortable
41
+ )
42
+ end
43
+
44
+ def build(response_ptr_or_code, message_prefix = nil, broker_message: nil)
45
+ if response_ptr_or_code.is_a?(Integer)
46
+ response_ptr_or_code.zero? ? false : new(response_ptr_or_code, message_prefix, broker_message: broker_message)
47
+ else
48
+ build_from_c(response_ptr_or_code, message_prefix)
49
+ end
50
+ end
51
+
52
+ def validate!(response_ptr_or_code, message_prefix = nil, broker_message: nil)
53
+ error = build(response_ptr_or_code, message_prefix, broker_message: broker_message)
54
+ error ? raise(error) : false
55
+ end
56
+ end
57
+
21
58
  # @private
22
- def initialize(response, message_prefix=nil, broker_message: nil)
59
+ def initialize(
60
+ response,
61
+ message_prefix=nil,
62
+ broker_message: nil,
63
+ fatal: false,
64
+ retryable: false,
65
+ abortable: false
66
+ )
23
67
  raise TypeError.new("Response has to be an integer") unless response.is_a? Integer
24
68
  @rdkafka_response = response
25
69
  @message_prefix = message_prefix
26
70
  @broker_message = broker_message
71
+ @fatal = fatal
72
+ @retryable = retryable
73
+ @abortable = abortable
27
74
  end
28
75
 
29
76
  # This error's code, for example `:partition_eof`, `:msg_size_too_large`.
@@ -58,6 +105,18 @@ module Rdkafka
58
105
  def ==(another_error)
59
106
  another_error.is_a?(self.class) && (self.to_s == another_error.to_s)
60
107
  end
108
+
109
+ def fatal?
110
+ @fatal
111
+ end
112
+
113
+ def retryable?
114
+ @retryable
115
+ end
116
+
117
+ def abortable?
118
+ @abortable
119
+ end
61
120
  end
62
121
 
63
122
  # Error with topic partition list returned by the underlying rdkafka library.
@@ -29,8 +29,7 @@ module Rdkafka
29
29
  # Retrieve the Metadata
30
30
  result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, timeout_ms)
31
31
 
32
- # Error Handling
33
- raise Rdkafka::RdkafkaError.new(result) unless result.zero?
32
+ Rdkafka::RdkafkaError.validate!(result)
34
33
 
35
34
  metadata_from_native(ptr.read_pointer)
36
35
  rescue ::Rdkafka::RdkafkaError => e
@@ -58,11 +57,12 @@ module Rdkafka
58
57
 
59
58
  @topics = Array.new(metadata[:topics_count]) do |i|
60
59
  topic = TopicMetadata.new(metadata[:topics_metadata] + (i * TopicMetadata.size))
61
- raise Rdkafka::RdkafkaError.new(topic[:rd_kafka_resp_err]) unless topic[:rd_kafka_resp_err].zero?
60
+
61
+ RdkafkaError.validate!(topic[:rd_kafka_resp_err])
62
62
 
63
63
  partitions = Array.new(topic[:partition_count]) do |j|
64
64
  partition = PartitionMetadata.new(topic[:partitions_metadata] + (j * PartitionMetadata.size))
65
- raise Rdkafka::RdkafkaError.new(partition[:rd_kafka_resp_err]) unless partition[:rd_kafka_resp_err].zero?
65
+ RdkafkaError.validate!(partition[:rd_kafka_resp_err])
66
66
  partition.to_h
67
67
  end
68
68
  topic.to_h.merge!(partitions: partitions)
@@ -31,7 +31,7 @@ module Rdkafka
31
31
  # For part of errors, we will not get a topic name reference and in cases like this
32
32
  # we should not return it
33
33
  self[:topic_name].null? ? nil : self[:topic_name].read_string,
34
- RdkafkaError.new(self[:response])
34
+ Rdkafka::RdkafkaError.build(self[:response])
35
35
  )
36
36
  end
37
37
  end
@@ -63,6 +63,47 @@ module Rdkafka
63
63
  @delivery_callback_arity = arity(callback)
64
64
  end
65
65
 
66
+ # Init transactions
67
+ # Run once per producer
68
+ def init_transactions
69
+ closed_producer_check(__method__)
70
+
71
+ @native_kafka.with_inner do |inner|
72
+ response_ptr = Rdkafka::Bindings.rd_kafka_init_transactions(inner, -1)
73
+
74
+ Rdkafka::RdkafkaError.validate!(response_ptr) || true
75
+ end
76
+ end
77
+
78
+ def begin_transaction
79
+ closed_producer_check(__method__)
80
+
81
+ @native_kafka.with_inner do |inner|
82
+ response_ptr = Rdkafka::Bindings.rd_kafka_begin_transaction(inner)
83
+
84
+ Rdkafka::RdkafkaError.validate!(response_ptr)
85
+ end
86
+ end
87
+
88
+ def commit_transaction(timeout_ms = -1)
89
+ closed_producer_check(__method__)
90
+
91
+ @native_kafka.with_inner do |inner|
92
+ response_ptr = Rdkafka::Bindings.rd_kafka_commit_transaction(inner, timeout_ms)
93
+
94
+ Rdkafka::RdkafkaError.validate!(response_ptr)
95
+ end
96
+ end
97
+
98
+ def abort_transaction(timeout_ms = -1)
99
+ closed_producer_check(__method__)
100
+
101
+ @native_kafka.with_inner do |inner|
102
+ response_ptr = Rdkafka::Bindings.rd_kafka_abort_transaction(inner, timeout_ms)
103
+ Rdkafka::RdkafkaError.validate!(response_ptr)
104
+ end
105
+ end
106
+
66
107
  # Close this producer and wait for the internal poll queue to empty.
67
108
  def close
68
109
  return if closed?
@@ -79,12 +120,50 @@ module Rdkafka
79
120
  # in seconds. Call this before closing a producer to ensure delivery of all messages.
80
121
  #
81
122
  # @param timeout_ms [Integer] how long should we wait for flush of all messages
123
+ # @return [Boolean] true if no more data and all was flushed, false in case there are still
124
+ # outgoing messages after the timeout
125
+ #
126
+ # @note We raise an exception for other errors because based on the librdkafka docs, there
127
+ # should be no other errors.
128
+ #
129
+ # @note For `timed_out` we do not raise an error to keep it backwards compatible
82
130
  def flush(timeout_ms=5_000)
83
131
  closed_producer_check(__method__)
84
132
 
133
+ error = @native_kafka.with_inner do |inner|
134
+ response = Rdkafka::Bindings.rd_kafka_flush(inner, timeout_ms)
135
+ Rdkafka::RdkafkaError.build(response)
136
+ end
137
+
138
+ # Early skip not to build the error message
139
+ return true unless error
140
+ return false if error.code == :timed_out
141
+
142
+ raise(error)
143
+ end
144
+
145
+ # Purges the outgoing queue and releases all resources.
146
+ #
147
+ # Useful when closing the producer with outgoing messages to unstable clusters or when for
148
+ # any other reasons waiting cannot go on anymore. This purges both the queue and all the
149
+ # inflight requests + updates the delivery handles statuses so they can be materialized into
150
+ # `purge_queue` errors.
151
+ def purge
152
+ closed_producer_check(__method__)
153
+
85
154
  @native_kafka.with_inner do |inner|
86
- Rdkafka::Bindings.rd_kafka_flush(inner, timeout_ms)
155
+ response = Bindings.rd_kafka_purge(
156
+ inner,
157
+ Bindings::RD_KAFKA_PURGE_F_QUEUE | Bindings::RD_KAFKA_PURGE_F_INFLIGHT
158
+ )
159
+
160
+ Rdkafka::RdkafkaError.validate!(response)
87
161
  end
162
+
163
+ # Wait for the purge to affect everything
164
+ sleep(0.001) until flush(100)
165
+
166
+ true
88
167
  end
89
168
 
90
169
  # Partition count for a given topic.
@@ -208,7 +287,7 @@ module Rdkafka
208
287
  # Raise error if the produce call was not successful
209
288
  if response != 0
210
289
  DeliveryHandle.remove(delivery_handle.to_ptr.address)
211
- raise RdkafkaError.new(response)
290
+ Rdkafka::RdkafkaError.validate!(response)
212
291
  end
213
292
 
214
293
  delivery_handle
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.13.5"
4
+ VERSION = "0.13.6"
5
5
  LIBRDKAFKA_VERSION = "2.2.0"
6
6
  LIBRDKAFKA_SOURCE_SHA256 = "af9a820cbecbc64115629471df7c7cecd40403b6c34bfdbb9223152677a47226"
7
7
  end
@@ -45,10 +45,12 @@ describe Rdkafka::Admin::CreateTopicHandle do
45
45
  describe "#raise_error" do
46
46
  let(:pending_handle) { false }
47
47
 
48
- it "should raise the appropriate error" do
48
+ before { subject[:response] = -1 }
49
+
50
+ it "should raise the appropriate error when there is an error" do
49
51
  expect {
50
52
  subject.raise_error
51
- }.to raise_exception(Rdkafka::RdkafkaError, /Success \(no_error\)/)
53
+ }.to raise_exception(Rdkafka::RdkafkaError, /Unknown broker error \(unknown\)/)
52
54
  end
53
55
  end
54
56
  end
@@ -45,10 +45,12 @@ describe Rdkafka::Admin::DeleteTopicHandle do
45
45
  describe "#raise_error" do
46
46
  let(:pending_handle) { false }
47
47
 
48
+ before { subject[:response] = -1 }
49
+
48
50
  it "should raise the appropriate error" do
49
51
  expect {
50
52
  subject.raise_error
51
- }.to raise_exception(Rdkafka::RdkafkaError, /Success \(no_error\)/)
53
+ }.to raise_exception(Rdkafka::RdkafkaError, /Unknown broker error \(unknown\)/)
52
54
  end
53
55
  end
54
56
  end
@@ -33,7 +33,7 @@ describe Rdkafka::Admin do
33
33
  }.to raise_exception { |ex|
34
34
  expect(ex).to be_a(Rdkafka::RdkafkaError)
35
35
  expect(ex.message).to match(/Broker: Invalid topic \(topic_exception\)/)
36
- expect(ex.broker_message).to match(/Topic name.*is illegal, it contains a character other than ASCII alphanumerics/)
36
+ expect(ex.broker_message).to match(/Topic name.*is invalid: .* contains one or more characters other than ASCII alphanumerics, '.', '_' and '-'/)
37
37
  }
38
38
  end
39
39
  end
@@ -31,7 +31,7 @@ describe Rdkafka::Metadata do
31
31
  it "#brokers returns our single broker" do
32
32
  expect(subject.brokers.length).to eq(1)
33
33
  expect(subject.brokers[0][:broker_id]).to eq(1)
34
- expect(subject.brokers[0][:broker_name]).to eq("localhost")
34
+ expect(subject.brokers[0][:broker_name]).to eq("127.0.0.1")
35
35
  expect(subject.brokers[0][:broker_port]).to eq(9092)
36
36
  end
37
37
 
@@ -54,7 +54,7 @@ describe Rdkafka::Metadata do
54
54
  it "#brokers returns our single broker" do
55
55
  expect(subject.brokers.length).to eq(1)
56
56
  expect(subject.brokers[0][:broker_id]).to eq(1)
57
- expect(subject.brokers[0][:broker_name]).to eq("localhost")
57
+ expect(subject.brokers[0][:broker_name]).to eq("127.0.0.1")
58
58
  expect(subject.brokers[0][:broker_port]).to eq(9092)
59
59
  end
60
60
 
@@ -559,6 +559,22 @@ describe Rdkafka::Producer do
559
559
  end
560
560
  end
561
561
 
562
+ context "when not being able to deliver the message" do
563
+ let(:producer) do
564
+ rdkafka_producer_config(
565
+ "bootstrap.servers": "localhost:9093",
566
+ "message.timeout.ms": 100
567
+ ).producer
568
+ end
569
+
570
+ it "should contain the error in the response when not deliverable" do
571
+ handler = producer.produce(topic: 'produce_test_topic', payload: nil)
572
+ # Wait for the async callbacks and delivery registry to update
573
+ sleep(2)
574
+ expect(handler.create_result.error).to be_a(Rdkafka::RdkafkaError)
575
+ end
576
+ end
577
+
562
578
  describe '#partition_count' do
563
579
  it { expect(producer.partition_count('example_topic')).to eq(1) }
564
580
 
@@ -628,19 +644,274 @@ describe Rdkafka::Producer do
628
644
  end
629
645
  end
630
646
 
631
- context "when not being able to deliver the message" do
647
+ describe '#flush' do
648
+ it "should return flush when it can flush all outstanding messages or when no messages" do
649
+ producer.produce(
650
+ topic: "produce_test_topic",
651
+ payload: "payload headers",
652
+ key: "key headers",
653
+ headers: {}
654
+ )
655
+
656
+ expect(producer.flush(5_000)).to eq(true)
657
+ end
658
+
659
+ context 'when it cannot flush due to a timeout' do
660
+ let(:producer) do
661
+ rdkafka_producer_config(
662
+ "bootstrap.servers": "localhost:9093",
663
+ "message.timeout.ms": 2_000
664
+ ).producer
665
+ end
666
+
667
+ after do
668
+ # Allow rdkafka to evict message preventing memory-leak
669
+ sleep(2)
670
+ end
671
+
672
+ it "should return false on flush when cannot deliver and beyond timeout" do
673
+ producer.produce(
674
+ topic: "produce_test_topic",
675
+ payload: "payload headers",
676
+ key: "key headers",
677
+ headers: {}
678
+ )
679
+
680
+ expect(producer.flush(1_000)).to eq(false)
681
+ end
682
+ end
683
+
684
+ context 'when there is a different error' do
685
+ before { allow(Rdkafka::Bindings).to receive(:rd_kafka_flush).and_return(-199) }
686
+
687
+ it 'should raise it' do
688
+ expect { producer.flush }.to raise_error(Rdkafka::RdkafkaError)
689
+ end
690
+ end
691
+ end
692
+
693
+ describe '#purge' do
694
+ context 'when no outgoing messages' do
695
+ it { expect(producer.purge).to eq(true) }
696
+ end
697
+
698
+ context 'when librdkafka purge returns an error' do
699
+ before { expect(Rdkafka::Bindings).to receive(:rd_kafka_purge).and_return(-153) }
700
+
701
+ it 'expect to raise an error' do
702
+ expect { producer.purge }.to raise_error(Rdkafka::RdkafkaError, /retry/)
703
+ end
704
+ end
705
+
706
+ context 'when there are outgoing things in the queue' do
707
+ let(:producer) do
708
+ rdkafka_producer_config(
709
+ "bootstrap.servers": "localhost:9093",
710
+ "message.timeout.ms": 2_000
711
+ ).producer
712
+ end
713
+
714
+ it "should should purge and move forward" do
715
+ producer.produce(
716
+ topic: "produce_test_topic",
717
+ payload: "payload headers"
718
+ )
719
+
720
+ expect(producer.purge).to eq(true)
721
+ expect(producer.flush(1_000)).to eq(true)
722
+ end
723
+
724
+ it "should materialize the delivery handles" do
725
+ handle = producer.produce(
726
+ topic: "produce_test_topic",
727
+ payload: "payload headers"
728
+ )
729
+
730
+ expect(producer.purge).to eq(true)
731
+
732
+ expect { handle.wait }.to raise_error(Rdkafka::RdkafkaError, /purge_queue/)
733
+ end
734
+
735
+ context "when using delivery_callback" do
736
+ let(:delivery_reports) { [] }
737
+
738
+ let(:delivery_callback) do
739
+ ->(delivery_report) { delivery_reports << delivery_report }
740
+ end
741
+
742
+ before { producer.delivery_callback = delivery_callback }
743
+
744
+ it "should run the callback" do
745
+ handle = producer.produce(
746
+ topic: "produce_test_topic",
747
+ payload: "payload headers"
748
+ )
749
+
750
+ expect(producer.purge).to eq(true)
751
+ # queue purge
752
+ expect(delivery_reports[0].error).to eq(-152)
753
+ end
754
+ end
755
+ end
756
+ end
757
+
758
+ context 'when working with transactions' do
632
759
  let(:producer) do
633
760
  rdkafka_producer_config(
634
- "bootstrap.servers": "localhost:9093",
635
- "message.timeout.ms": 100
761
+ 'transactional.id': SecureRandom.uuid,
762
+ 'transaction.timeout.ms': 5_000
636
763
  ).producer
637
764
  end
638
765
 
639
- it "should contain the error in the response when not deliverable" do
640
- handler = producer.produce(topic: 'produce_test_topic', payload: nil)
641
- # Wait for the async callbacks and delivery registry to update
642
- sleep(2)
643
- expect(handler.create_result.error).to be_a(Rdkafka::RdkafkaError)
766
+ it 'expect not to allow to produce without transaction init' do
767
+ expect do
768
+ producer.produce(topic: 'produce_test_topic', payload: 'data')
769
+ end.to raise_error(Rdkafka::RdkafkaError, /Erroneous state \(state\)/)
770
+ end
771
+
772
+ it 'expect to raise error when transactions are initialized but producing not in one' do
773
+ producer.init_transactions
774
+
775
+ expect do
776
+ producer.produce(topic: 'produce_test_topic', payload: 'data')
777
+ end.to raise_error(Rdkafka::RdkafkaError, /Erroneous state \(state\)/)
778
+ end
779
+
780
+ it 'expect to allow to produce within a transaction, finalize and ship data' do
781
+ producer.init_transactions
782
+ producer.begin_transaction
783
+ handle1 = producer.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
784
+ handle2 = producer.produce(topic: 'example_topic', payload: 'data2', partition: 0)
785
+ producer.commit_transaction
786
+
787
+ report1 = handle1.wait(max_wait_timeout: 15)
788
+ report2 = handle2.wait(max_wait_timeout: 15)
789
+
790
+ message1 = wait_for_message(
791
+ topic: "produce_test_topic",
792
+ delivery_report: report1,
793
+ consumer: consumer
794
+ )
795
+
796
+ expect(message1.partition).to eq 1
797
+ expect(message1.payload).to eq "data1"
798
+ expect(message1.timestamp).to be_within(10).of(Time.now)
799
+
800
+ message2 = wait_for_message(
801
+ topic: "example_topic",
802
+ delivery_report: report2,
803
+ consumer: consumer
804
+ )
805
+
806
+ expect(message2.partition).to eq 0
807
+ expect(message2.payload).to eq "data2"
808
+ expect(message2.timestamp).to be_within(10).of(Time.now)
809
+ end
810
+
811
+ it 'expect not to send data and propagate purge queue error on abort' do
812
+ producer.init_transactions
813
+ producer.begin_transaction
814
+ handle1 = producer.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
815
+ handle2 = producer.produce(topic: 'example_topic', payload: 'data2', partition: 0)
816
+ producer.abort_transaction
817
+
818
+ expect { handle1.wait(max_wait_timeout: 15) }
819
+ .to raise_error(Rdkafka::RdkafkaError, /Purged in queue \(purge_queue\)/)
820
+ expect { handle2.wait(max_wait_timeout: 15) }
821
+ .to raise_error(Rdkafka::RdkafkaError, /Purged in queue \(purge_queue\)/)
822
+ end
823
+
824
+ it 'expect to have non retryable, non abortable and not fatal error on abort' do
825
+ producer.init_transactions
826
+ producer.begin_transaction
827
+ handle = producer.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
828
+ producer.abort_transaction
829
+
830
+ response = handle.wait(raise_response_error: false)
831
+
832
+ expect(response.error).to be_a(Rdkafka::RdkafkaError)
833
+ expect(response.error.retryable?).to eq(false)
834
+ expect(response.error.fatal?).to eq(false)
835
+ expect(response.error.abortable?).to eq(false)
836
+ end
837
+
838
+ # This may not always crash, depends on load but no other way to check it
839
+ context 'when timeout is too short and error occurs and we can abort' do
840
+ let(:producer) do
841
+ rdkafka_producer_config(
842
+ 'transactional.id': SecureRandom.uuid,
843
+ 'transaction.timeout.ms': 1_000
844
+ ).producer
845
+ end
846
+
847
+ it 'expect to allow to produce within a transaction, finalize and ship data' do
848
+ producer.init_transactions
849
+ producer.begin_transaction
850
+
851
+ handle1 = producer.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
852
+ handle2 = producer.produce(topic: 'example_topic', payload: 'data2', partition: 0)
853
+
854
+ begin
855
+ producer.commit_transaction
856
+ rescue Rdkafka::RdkafkaError => e
857
+ next unless e.abortable?
858
+
859
+ producer.abort_transaction
860
+
861
+ expect { handle1.wait(max_wait_timeout: 15) }.to raise_error(Rdkafka::RdkafkaError)
862
+ expect { handle2.wait(max_wait_timeout: 15) }.to raise_error(Rdkafka::RdkafkaError)
863
+ end
864
+ end
865
+ end
866
+
867
+ context 'fencing against previous active producer with same transactional id' do
868
+ let(:transactional_id) { SecureRandom.uuid }
869
+
870
+ let(:producer1) do
871
+ rdkafka_producer_config(
872
+ 'transactional.id': transactional_id,
873
+ 'transaction.timeout.ms': 10_000
874
+ ).producer
875
+ end
876
+
877
+ let(:producer2) do
878
+ rdkafka_producer_config(
879
+ 'transactional.id': transactional_id,
880
+ 'transaction.timeout.ms': 10_000
881
+ ).producer
882
+ end
883
+
884
+ after do
885
+ producer1.close
886
+ producer2.close
887
+ end
888
+
889
+ it 'expect older producer not to be able to commit when fanced out' do
890
+ producer1.init_transactions
891
+ producer1.begin_transaction
892
+ producer1.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
893
+
894
+ producer2.init_transactions
895
+ producer2.begin_transaction
896
+ producer2.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
897
+
898
+ expect { producer1.commit_transaction }
899
+ .to raise_error(Rdkafka::RdkafkaError, /This instance has been fenced/)
900
+
901
+ error = false
902
+
903
+ begin
904
+ producer1.commit_transaction
905
+ rescue Rdkafka::RdkafkaError => e
906
+ error = e
907
+ end
908
+
909
+ expect(error.fatal?).to eq(true)
910
+ expect(error.abortable?).to eq(false)
911
+ expect(error.retryable?).to eq(false)
912
+
913
+ expect { producer2.commit_transaction }.not_to raise_error
914
+ end
644
915
  end
645
916
  end
646
917
  end
data/spec/spec_helper.rb CHANGED
@@ -11,6 +11,7 @@ require "pry"
11
11
  require "rspec"
12
12
  require "rdkafka"
13
13
  require "timeout"
14
+ require 'securerandom'
14
15
 
15
16
  def rdkafka_base_config
16
17
  {
@@ -134,6 +135,7 @@ RSpec.configure do |config|
134
135
  rake_test_topic: 3,
135
136
  watermarks_test_topic: 3,
136
137
  partitioner_test_topic: 25,
138
+ example_topic: 1
137
139
  }.each do |topic, partitions|
138
140
  create_topic_handle = admin.create_topic(topic.to_s, partitions, 1)
139
141
  begin
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka-rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.13.5
4
+ version: 0.13.6
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
@@ -35,7 +35,7 @@ cert_chain:
35
35
  AnG1dJU+yL2BK7vaVytLTstJME5mepSZ46qqIJXMuWob/YPDmVaBF39TDSG9e34s
36
36
  msG3BiCqgOgHAnL23+CN3Rt8MsuRfEtoTKpJVcCfoEoNHOkc
37
37
  -----END CERTIFICATE-----
38
- date: 2023-09-16 00:00:00.000000000 Z
38
+ date: 2023-10-17 00:00:00.000000000 Z
39
39
  dependencies:
40
40
  - !ruby/object:Gem::Dependency
41
41
  name: ffi
metadata.gz.sig CHANGED
Binary file