fluent-plugin-kafka 0.19.2 → 0.19.3

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 62d114017cabc47f58cd39db1ea21dd8cfb6d76126c4804741e25766a5b4576c
4
- data.tar.gz: f5e4ea9adb96a8f2f46077c0ac89b51e1c177ea0933242ed6cbb6472e40577a2
3
+ metadata.gz: 8be5590390d2220ee64b54e442178369ae55be6a111cdb60c9815fe2f96e9337
4
+ data.tar.gz: 8a76798c2d4c0f8abaaf3b0c209c0a4e6770aeb825756ef6715861e392f6b051
5
5
  SHA512:
6
- metadata.gz: 80f4d2ce4d23d95837d3b344841339fc517581bc3347e17e5352a135b088756317b970b559eb66f05e0216ce8841c9e374c5ed0fd09e48d22346249ec9aec0b9
7
- data.tar.gz: 8d89ff55bf6ac92e5d2c7a3942882a6b307e6fe19ede620bf299d54feb6f2f1d70fcdece731008324a993c96c69f44456a71798c9af879666de9d017b38e39c3
6
+ metadata.gz: 2d8fc83684627125a52398ff3561ba4e0ac878bac7b6ab7075ab9d07f3a018ceaa132b013652290198f64333a336f614384a0b7c18392d05e6494bb3fe259ab4
7
+ data.tar.gz: 98257dcd81f581697a702ba85d6e2c01c497c5dc98d9dfbc3f908856e6b1d418380ce5c6fb9418fe69c2fc7ec5889b3c84c1aeb6ba4182a6cf3c08a7bd87fff5
@@ -12,12 +12,26 @@ jobs:
12
12
  strategy:
13
13
  fail-fast: false
14
14
  matrix:
15
- ruby: [ '3.2', '3.1', '3.0', '2.7' ]
15
+ ruby: [ '3.3', '3.2', '3.1', '3.0' ]
16
16
  os:
17
17
  - ubuntu-latest
18
18
  rdkafka_versions:
19
19
  - { min: '>= 0.6.0', max: '< 0.12.0' }
20
- - { min: '>= 0.12.0', max: '>= 0.12.0' }
20
+ - { min: '>= 0.12.0', max: '< 0.14.0' }
21
+ - { min: '>= 0.14.0', max: '< 0.16.0' }
22
+ - { min: '>= 0.16.0', max: '>= 0.16.0' }
23
+ bundler_version:
24
+ - '2.5.16'
25
+ # rdkafka 0.15.2 is the last version which supports Ruby 2.7
26
+ include:
27
+ - ruby: '2.7'
28
+ os: ubuntu-latest
29
+ rdkafka_versions: { min: '>= 0.6.0', max: '< 0.12.0' }
30
+ bundler_version: '2.4.22'
31
+ - ruby: '2.7'
32
+ os: ubuntu-latest
33
+ rdkafka_versions: { min: '>= 0.12.0', max: '= 0.15.2' }
34
+ bundler_version: '2.4.22'
21
35
  name: Ruby ${{ matrix.ruby }} unit testing on ${{ matrix.os }} with rdkafka gem version (min ${{ matrix.rdkafka_versions.min }} max ${{ matrix.rdkafka_versions.max }})
22
36
  steps:
23
37
  - uses: actions/checkout@v4
@@ -40,6 +54,7 @@ jobs:
40
54
  RDKAFKA_VERSION_MAX_RANGE: ${{ matrix.rdkafka_versions.max }}
41
55
  run: |
42
56
  sudo ./ci/prepare-kafka-server.sh
43
- gem install bundler rake
44
- bundle install --jobs 4 --retry 3
45
- bundle exec rake test
57
+ gem install bundler -v ${{ matrix.bundler_version }}
58
+ gem install rake
59
+ bundle _${{ matrix.bundler_version }}_ install --jobs 4 --retry 3
60
+ bundle _${{ matrix.bundler_version }}_ exec rake test
@@ -7,7 +7,7 @@ jobs:
7
7
  stale:
8
8
  runs-on: ubuntu-latest
9
9
  steps:
10
- - uses: actions/stale@v8
10
+ - uses: actions/stale@v9
11
11
  with:
12
12
  repo-token: ${{ secrets.GITHUB_TOKEN }}
13
13
  days-before-stale: 90
data/ChangeLog CHANGED
@@ -1,3 +1,13 @@
1
+ Release 0.19.3 - 2024/08/02
2
+ * out_rdkafka2: Add `unrecoverable_error_codes` parameter to handle
3
+ specific error code as unrecoverable errors. `topic_authorization_failed`
4
+ and `msg_size_too_large` are treated as such unrecoverable error by default. (#510)
5
+ * out_rdkafka2: Add missing closing timeout feature to keep compatibility with
6
+ rdkafka-ruby 0.12.x or later. (#505)
7
+ * out_rdkafka2: Add `idempotent` parameter to enable idempotence in Kafka producer. (#501)
8
+ * out_kafka2: Fix errors while sending data to EventHub by adding
9
+ broker pool to take care of fetching metadata (#503)
10
+
1
11
  Release 0.19.2 - 2023/10/13
2
12
  * out_rdkafka2: Add `discard_kafka_delivery_failed_regex`
3
13
 
data/README.md CHANGED
@@ -212,6 +212,7 @@ If `ruby-kafka` doesn't fit your kafka environment, check `rdkafka2` plugin inst
212
212
  discard_kafka_delivery_failed (bool) :default => false (No discard)
213
213
  partitioner_hash_function (enum) (crc32|murmur2) :default => 'crc32'
214
214
  share_producer (bool) :default => false
215
+ idempotent (bool) :default => false
215
216
 
216
217
  # If you intend to rely on AWS IAM auth to MSK with long lived credentials
217
218
  # https://docs.aws.amazon.com/msk/latest/developerguide/iam-access-control.html
@@ -509,7 +510,6 @@ You need to install rdkafka gem.
509
510
  partition_key (string) :default => 'partition'
510
511
  partition_key_key (string) :default => 'partition_key'
511
512
  message_key_key (string) :default => 'message_key'
512
- default_topic (string) :default => nil
513
513
  use_default_for_unknown_topic (bool) :default => false
514
514
  use_default_for_unknown_partition_error (bool) :default => false
515
515
  default_partition_key (string) :default => nil
@@ -558,6 +558,8 @@ You need to install rdkafka gem.
558
558
  # load of both Fluentd and Kafka when excessive messages are attempted
559
559
  # to send. Default is no limit.
560
560
  max_enqueue_bytes_per_second (integer) :default => nil
561
+ unrecoverable_error_codes (array) :default => ["topic_authorization_failed", "msg_size_too_large"]
562
+
561
563
  </match>
562
564
 
563
565
  `rdkafka2` supports `discard_kafka_delivery_failed_regex` parameter:
@@ -13,7 +13,7 @@ Gem::Specification.new do |gem|
13
13
  gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
14
14
  gem.name = "fluent-plugin-kafka"
15
15
  gem.require_paths = ["lib"]
16
- gem.version = '0.19.2'
16
+ gem.version = '0.19.3'
17
17
  gem.required_ruby_version = ">= 2.1.0"
18
18
 
19
19
  gem.add_dependency "fluentd", [">= 0.10.58", "< 2"]
@@ -93,6 +93,10 @@ module Kafka
93
93
  @max_buffer_bytesize = max_buffer_bytesize
94
94
  @compressor = compressor
95
95
  @partitioner = partitioner
96
+
97
+ # The set of topics that are produced to.
98
+ @target_topics = Set.new
99
+
96
100
  # A buffer organized by topic/partition.
97
101
  @buffer = MessageBuffer.new
98
102
 
@@ -116,7 +120,8 @@ module Kafka
116
120
  if @transaction_manager.transactional? && !@transaction_manager.in_transaction?
117
121
  raise 'You must trigger begin_transaction before producing messages'
118
122
  end
119
-
123
+
124
+ @target_topics.add(topic)
120
125
  @pending_message_queue.write(message)
121
126
 
122
127
  nil
@@ -187,7 +192,7 @@ module Kafka
187
192
  def deliver_messages_with_retries
188
193
  attempt = 0
189
194
 
190
- #@cluster.add_target_topics(@target_topics)
195
+ @cluster.add_target_topics(@target_topics)
191
196
 
192
197
  operation = ProduceOperation.new(
193
198
  cluster: @cluster,
@@ -5,48 +5,19 @@ require 'fluent/plugin/kafka_plugin_util'
5
5
 
6
6
  require 'rdkafka'
7
7
 
8
- # This is required for `rdkafka` version >= 0.12.0
9
- # Overriding the close method in order to provide a time limit for when it should be forcibly closed
10
- class Rdkafka::Producer::Client
11
- # return false if producer is forcefully closed, otherwise return true
12
- def close(timeout=nil)
13
- return unless @native
14
-
15
- # Indicate to polling thread that we're closing
16
- @polling_thread[:closing] = true
17
- # Wait for the polling thread to finish up
18
- thread = @polling_thread.join(timeout)
19
-
20
- Rdkafka::Bindings.rd_kafka_destroy(@native)
21
-
22
- @native = nil
23
-
24
- return !thread.nil?
25
- end
26
- end
27
-
28
- class Rdkafka::Producer
29
- # return false if producer is forcefully closed, otherwise return true
30
- def close(timeout = nil)
31
- rdkafka_version = Rdkafka::VERSION || '0.0.0'
32
- # Rdkafka version >= 0.12.0 changed its internals
33
- if Gem::Version::create(rdkafka_version) >= Gem::Version.create('0.12.0')
34
- ObjectSpace.undefine_finalizer(self)
35
-
36
- return @client.close(timeout)
37
- end
38
-
39
- @closing = true
40
- # Wait for the polling thread to finish up
41
- # If the broker isn't alive, the thread doesn't exit
42
- if timeout
43
- thr = @polling_thread.join(timeout)
44
- return !!thr
45
- else
46
- @polling_thread.join
47
- return true
48
- end
8
+ begin
9
+ rdkafka_version = Gem::Version::create(Rdkafka::VERSION)
10
+ if rdkafka_version < Gem::Version.create('0.12.0')
11
+ require_relative 'rdkafka_patch/0_11_0'
12
+ elsif rdkafka_version == Gem::Version.create('0.12.0')
13
+ require_relative 'rdkafka_patch/0_12_0'
14
+ elsif rdkafka_version >= Gem::Version.create('0.14.0')
15
+ require_relative 'rdkafka_patch/0_14_0'
16
+ elsif rdkafka_version >= Gem::Version.create('0.16.0')
17
+ require_relative 'rdkafka_patch/0_16_0'
49
18
  end
19
+ rescue LoadError, NameError
20
+ raise "unable to patch rdkafka."
50
21
  end
51
22
 
52
23
  module Fluent::Plugin
@@ -71,6 +42,7 @@ DESC
71
42
  config_param :default_message_key, :string, :default => nil
72
43
  config_param :partition_key, :string, :default => 'partition', :desc => "Field for kafka partition"
73
44
  config_param :default_partition, :integer, :default => nil
45
+ config_param :idempotent, :bool, :default => false, :desc => 'Enable idempotent producer'
74
46
  config_param :output_data_type, :string, :default => 'json', :obsoleted => "Use <format> section instead"
75
47
  config_param :output_include_tag, :bool, :default => false, :obsoleted => "Use <inject> section instead"
76
48
  config_param :output_include_time, :bool, :default => false, :obsoleted => "Use <inject> section instead"
@@ -126,6 +98,8 @@ DESC
126
98
  config_param :max_enqueue_bytes_per_second, :size, :default => nil, :desc => 'The maximum number of enqueueing bytes per second'
127
99
 
128
100
  config_param :service_name, :string, :default => nil, :desc => 'Used for sasl.kerberos.service.name'
101
+ config_param :unrecoverable_error_codes, :array, :default => ["topic_authorization_failed", "msg_size_too_large"],
102
+ :desc => 'Handle some of the error codes should be unrecoverable if specified'
129
103
 
130
104
  config_section :buffer do
131
105
  config_set_default :chunk_keys, ["topic"]
@@ -313,6 +287,7 @@ DESC
313
287
  config[:"batch.num.messages"] = @rdkafka_message_max_num if @rdkafka_message_max_num
314
288
  config[:"sasl.username"] = @username if @username
315
289
  config[:"sasl.password"] = @password if @password
290
+ config[:"enable.idempotence"] = @idempotent if @idempotent
316
291
 
317
292
  @rdkafka_options.each { |k, v|
318
293
  config[k.to_sym] = v
@@ -522,7 +497,12 @@ DESC
522
497
 
523
498
  raise e
524
499
  else
525
- raise e
500
+ if unrecoverable_error_codes.include?(e.code.to_s)
501
+ # some of the errors should be handled as an unrecoverable error
502
+ raise Fluent::UnrecoverableError, "Rejected due to #{e}"
503
+ else
504
+ raise e
505
+ end
526
506
  end
527
507
  end
528
508
  end
@@ -0,0 +1,15 @@
1
+ class Rdkafka::Producer
2
+ # return false if producer is forcefully closed, otherwise return true
3
+ def close(timeout = nil)
4
+ @closing = true
5
+ # Wait for the polling thread to finish up
6
+ # If the broker isn't alive, the thread doesn't exit
7
+ if timeout
8
+ thr = @polling_thread.join(timeout)
9
+ return !!thr
10
+ else
11
+ @polling_thread.join
12
+ return true
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,27 @@
1
+ # This is required for `rdkafka` version >= 0.12.0
2
+ # Overriding the close method in order to provide a time limit for when it should be forcibly closed
3
+ class Rdkafka::Producer::Client
4
+ # return false if producer is forcefully closed, otherwise return true
5
+ def close(timeout=nil)
6
+ return unless @native
7
+
8
+ # Indicate to polling thread that we're closing
9
+ @polling_thread[:closing] = true
10
+ # Wait for the polling thread to finish up
11
+ thread = @polling_thread.join(timeout)
12
+
13
+ Rdkafka::Bindings.rd_kafka_destroy(@native)
14
+
15
+ @native = nil
16
+
17
+ return !thread.nil?
18
+ end
19
+ end
20
+
21
+ class Rdkafka::Producer
22
+ def close(timeout = nil)
23
+ ObjectSpace.undefine_finalizer(self)
24
+
25
+ return @client.close(timeout)
26
+ end
27
+ end
@@ -0,0 +1,44 @@
1
+ class Rdkafka::NativeKafka
2
+ # return false if producer is forcefully closed, otherwise return true
3
+ def close(timeout=nil, object_id=nil)
4
+ return true if closed?
5
+
6
+ synchronize do
7
+ # Indicate to the outside world that we are closing
8
+ @closing = true
9
+
10
+ thread_status = :unknown
11
+ if @polling_thread
12
+ # Indicate to polling thread that we're closing
13
+ @polling_thread[:closing] = true
14
+
15
+ # Wait for the polling thread to finish up,
16
+ # this can be aborted in practice if this
17
+ # code runs from a finalizer.
18
+ thread_status = @polling_thread.join(timeout)
19
+ end
20
+
21
+ # Destroy the client after locking both mutexes
22
+ @poll_mutex.lock
23
+
24
+ # This check prevents a race condition, where we would enter the close in two threads
25
+ # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
26
+ # and would continue to run, trying to destroy inner twice
27
+ if @inner
28
+ Rdkafka::Bindings.rd_kafka_destroy(@inner)
29
+ @inner = nil
30
+ @opaque = nil
31
+ end
32
+
33
+ !thread_status.nil?
34
+ end
35
+ end
36
+ end
37
+
38
+ class Rdkafka::Producer
39
+ def close(timeout = nil)
40
+ return true if closed?
41
+ ObjectSpace.undefine_finalizer(self)
42
+ @native_kafka.close(timeout)
43
+ end
44
+ end
@@ -0,0 +1,55 @@
1
+ class Rdkafka::NativeKafka
2
+ # return false if producer is forcefully closed, otherwise return true
3
+ def close(timeout=nil, object_id=nil)
4
+ return true if closed?
5
+
6
+ synchronize do
7
+ # Indicate to the outside world that we are closing
8
+ @closing = true
9
+
10
+ thread_status = :unknown
11
+ if @polling_thread
12
+ # Indicate to polling thread that we're closing
13
+ @polling_thread[:closing] = true
14
+
15
+ # Wait for the polling thread to finish up,
16
+ # this can be aborted in practice if this
17
+ # code runs from a finalizer.
18
+ thread_status = @polling_thread.join(timeout)
19
+ end
20
+
21
+ # Destroy the client after locking both mutexes
22
+ @poll_mutex.lock
23
+
24
+ # This check prevents a race condition, where we would enter the close in two threads
25
+ # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
26
+ # and would continue to run, trying to destroy inner twice
27
+ retun unless @inner
28
+
29
+ Rdkafka::Bindings.rd_kafka_destroy(@inner)
30
+ @inner = nil
31
+ @opaque = nil
32
+
33
+ !thread_status.nil?
34
+ end
35
+ end
36
+ end
37
+
38
+ class Rdkafka::Producer
39
+ def close(timeout = nil)
40
+ return true if closed?
41
+ ObjectSpace.undefine_finalizer(self)
42
+
43
+ @native_kafka.close(timeout) do
44
+ # We need to remove the topics references objects before we destroy the producer,
45
+ # otherwise they would leak out
46
+ @topics_refs_map.each_value do |refs|
47
+ refs.each_value do |ref|
48
+ Rdkafka::Bindings.rd_kafka_topic_destroy(ref)
49
+ end
50
+ end
51
+ end
52
+
53
+ @topics_refs_map.clear
54
+ end
55
+ end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.19.2
4
+ version: 0.19.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Hidemasa Togashi
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2023-10-13 00:00:00.000000000 Z
12
+ date: 2024-08-02 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: fluentd
@@ -171,6 +171,10 @@ files:
171
171
  - lib/fluent/plugin/out_kafka_buffered.rb
172
172
  - lib/fluent/plugin/out_rdkafka.rb
173
173
  - lib/fluent/plugin/out_rdkafka2.rb
174
+ - lib/fluent/plugin/rdkafka_patch/0_11_0.rb
175
+ - lib/fluent/plugin/rdkafka_patch/0_12_0.rb
176
+ - lib/fluent/plugin/rdkafka_patch/0_14_0.rb
177
+ - lib/fluent/plugin/rdkafka_patch/0_16_0.rb
174
178
  - test/helper.rb
175
179
  - test/plugin/test_in_kafka.rb
176
180
  - test/plugin/test_in_kafka_group.rb