karafka-rdkafka 0.14.8 → 0.14.10

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 610db3521dfe3b63d906f2d8814756c04882cc68603a218842cc41b487124133
4
- data.tar.gz: d870f56e2d7fbc1ae8d14cda5757a02d01d121740da7656deab08341904f4677
3
+ metadata.gz: 92d61e2b492453bf19ead6abf1c9377d5222aeba36c19c25caff5641a2c8fb1b
4
+ data.tar.gz: 4170931c8ced8d09813d22359c36d36da09b3f39d2429daad6a342dd3df2982c
5
5
  SHA512:
6
- metadata.gz: 17bca0d972de4d24d2de1c1b39ac38bab36eff5f856dd65448215fb53a88fcc8bc0b61e856cf18802ad6649938f52b2d25fdc76b0055396821866841c8067e3a
7
- data.tar.gz: 87742fba542fc3df5af16e41baa36d5a7d0c33304f37e54d6ffc50121a4dcf7173a717a0e37353b77cac8828ec707aab54056e3eba53ecfd2780dd87ef9b21ec
6
+ metadata.gz: ceae2da64aad6589779b60160fc28b5db6bc305c289f76bbf7f0aa034afdf88277b3aa75f4c14ec8b441250e81bca7bf7afededea22c2eb0568753cf6968c2b3
7
+ data.tar.gz: 61a3c8a40bca6d598782b093f6a2575fcdbe247d6a31c624f95e236015d713e3d2ace38e2fae43b622b9e4c6b37be57ef0f2d508a0a57310d3eeb79404146436
checksums.yaml.gz.sig CHANGED
Binary file
data/CHANGELOG.md CHANGED
@@ -1,5 +1,12 @@
1
1
  # Rdkafka Changelog
2
2
 
3
+ ## 0.14.10 (2024-02-08)
4
+ - [Fix] Background logger stops working after forking causing memory leaks (mensfeld).
5
+
6
+ ## 0.14.9 (2024-01-29)
7
+ - [Fix] Partition cache caches invalid `nil` result for `PARTITIONS_COUNT_TTL`.
8
+ - [Enhancement] Report `-1` instead of `nil` in case `partition_count` failure.
9
+
3
10
  ## 0.14.8 (2024-01-24)
4
11
  - [Enhancement] Provide support for Nix OS (alexandriainfantino)
5
12
  - [Enhancement] Skip intermediate array creation on delivery report callback execution (one per message) (mensfeld)
@@ -3,10 +3,10 @@
3
3
  require File.expand_path('lib/rdkafka/version', __dir__)
4
4
 
5
5
  Gem::Specification.new do |gem|
6
- gem.authors = ['Thijs Cadier']
6
+ gem.authors = ['Thijs Cadier', 'Maciej Mensfeld']
7
7
  gem.email = ["contact@karafka.io"]
8
8
  gem.description = "Modern Kafka client library for Ruby based on librdkafka"
9
- gem.summary = "The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka. It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+ and Ruby 2.4+."
9
+ gem.summary = "The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka. It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+ and Ruby 2.7+."
10
10
  gem.license = 'MIT'
11
11
 
12
12
  gem.files = `git ls-files`.split($\)
@@ -147,6 +147,8 @@ module Rdkafka
147
147
  else
148
148
  Logger::UNKNOWN
149
149
  end
150
+
151
+ Rdkafka::Config.ensure_log_thread
150
152
  Rdkafka::Config.log_queue << [severity, "rdkafka: #{line}"]
151
153
  end
152
154
 
@@ -15,13 +15,12 @@ module Rdkafka
15
15
  @@opaques = ObjectSpace::WeakMap.new
16
16
  # @private
17
17
  @@log_queue = Queue.new
18
-
19
- Thread.start do
20
- loop do
21
- severity, msg = @@log_queue.pop
22
- @@logger.add(severity, msg)
23
- end
24
- end
18
+ # @private
19
+ # We memoize thread on the first log flush
20
+ # This allows us also to restart logger thread on forks
21
+ @@log_thread = nil
22
+ # @private
23
+ @@log_mutex = Mutex.new
25
24
 
26
25
  # Returns the current logger, by default this is a logger to stdout.
27
26
  #
@@ -30,6 +29,24 @@ module Rdkafka
30
29
  @@logger
31
30
  end
32
31
 
32
+ # Makes sure that there is a thread for consuming logs
33
+ # We do not spawn thread immediately and we need to check if it operates to support forking
34
+ def self.ensure_log_thread
35
+ return if @@log_thread && @@log_thread.alive?
36
+
37
+ @@log_mutex.synchronize do
38
+ # Restart if dead (fork, crash)
39
+ @@log_thread = nil if @@log_thread && !@@log_thread.alive?
40
+
41
+ @@log_thread ||= Thread.start do
42
+ loop do
43
+ severity, msg = @@log_queue.pop
44
+ @@logger.add(severity, msg)
45
+ end
46
+ end
47
+ end
48
+ end
49
+
33
50
  # Returns a queue whose contents will be passed to the configured logger. Each entry
34
51
  # should follow the format [Logger::Severity, String]. The benefit over calling the
35
52
  # logger directly is that this is safe to use from trap contexts.
@@ -40,10 +40,16 @@ module Rdkafka
40
40
  topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
41
41
  end
42
42
 
43
- cache[topic] = [
44
- monotonic_now,
45
- topic_metadata ? topic_metadata[:partition_count] : nil
46
- ]
43
+ partition_count = topic_metadata ? topic_metadata[:partition_count] : -1
44
+
45
+ # This approach caches the failure to fetch only for 1 second. This will make sure, that
46
+ # we do not cache the failure for too long but also "buys" us a bit of time in case there
47
+ # would be issues in the cluster so we won't overaload it with consecutive requests
48
+ cache[topic] = if partition_count.positive?
49
+ [monotonic_now, partition_count]
50
+ else
51
+ [monotonic_now - PARTITIONS_COUNT_TTL + 5, partition_count]
52
+ end
47
53
  end
48
54
  end
49
55
 
@@ -199,14 +205,15 @@ module Rdkafka
199
205
  # Partition count for a given topic.
200
206
  #
201
207
  # @param topic [String] The topic name.
202
- # @return [Integer] partition count for a given topic
208
+ # @return [Integer] partition count for a given topic or `-1` if it could not be obtained.
203
209
  #
204
210
  # @note If 'allow.auto.create.topics' is set to true in the broker, the topic will be
205
211
  # auto-created after returning nil.
206
212
  #
207
213
  # @note We cache the partition count for a given topic for given time.
208
214
  # This prevents us in case someone uses `partition_key` from querying for the count with
209
- # each message. Instead we query once every 30 seconds at most
215
+ # each message. Instead we query once every 30 seconds at most if we have a valid partition
216
+ # count or every 5 seconds in case we were not able to obtain number of partitions
210
217
  def partition_count(topic)
211
218
  closed_producer_check(__method__)
212
219
 
@@ -256,7 +263,7 @@ module Rdkafka
256
263
  if partition_key
257
264
  partition_count = partition_count(topic)
258
265
  # If the topic is not present, set to -1
259
- partition = Rdkafka::Bindings.partitioner(partition_key, partition_count, @partitioner_name) if partition_count
266
+ partition = Rdkafka::Bindings.partitioner(partition_key, partition_count, @partitioner_name) if partition_count.positive?
260
267
  end
261
268
 
262
269
  # If partition is nil, use -1 to let librdafka set the partition randomly or
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.14.8"
4
+ VERSION = "0.14.10"
5
5
  LIBRDKAFKA_VERSION = "2.3.0"
6
6
  LIBRDKAFKA_SOURCE_SHA256 = "2d49c35c77eeb3d42fa61c43757fcbb6a206daa560247154e60642bcdcc14d12"
7
7
  end
@@ -22,6 +22,7 @@ describe Rdkafka::Config do
22
22
  it "supports logging queue" do
23
23
  log = StringIO.new
24
24
  Rdkafka::Config.logger = Logger.new(log)
25
+ Rdkafka::Config.ensure_log_thread
25
26
 
26
27
  Rdkafka::Config.log_queue << [Logger::FATAL, "I love testing"]
27
28
  20.times do
@@ -31,6 +32,25 @@ describe Rdkafka::Config do
31
32
 
32
33
  expect(log.string).to include "FATAL -- : I love testing"
33
34
  end
35
+
36
+ it "expect to start new logger thread after fork and work" do
37
+ reader, writer = IO.pipe
38
+
39
+ pid = fork do
40
+ $stdout.reopen(writer)
41
+ Rdkafka::Config.logger = Logger.new($stdout)
42
+ reader.close
43
+ producer = rdkafka_producer_config(debug: 'all').producer
44
+ producer.close
45
+ writer.close
46
+ sleep(1)
47
+ end
48
+
49
+ writer.close
50
+ Process.wait(pid)
51
+ output = reader.read
52
+ expect(output.split("\n").size).to be >= 20
53
+ end
34
54
  end
35
55
 
36
56
  context "statistics callback" do
@@ -211,6 +211,11 @@ describe Rdkafka::Consumer do
211
211
 
212
212
  # 7. ensure same message is read again
213
213
  message2 = consumer.poll(timeout)
214
+
215
+ # This is needed because `enable.auto.offset.store` is true but when running in CI that
216
+ # is overloaded, offset store lags
217
+ sleep(1)
218
+
214
219
  consumer.commit
215
220
  expect(message1.offset).to eq message2.offset
216
221
  expect(message1.payload).to eq message2.payload
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,10 +1,11 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka-rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.14.8
4
+ version: 0.14.10
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
8
+ - Maciej Mensfeld
8
9
  autorequire:
9
10
  bindir: bin
10
11
  cert_chain:
@@ -35,7 +36,7 @@ cert_chain:
35
36
  AnG1dJU+yL2BK7vaVytLTstJME5mepSZ46qqIJXMuWob/YPDmVaBF39TDSG9e34s
36
37
  msG3BiCqgOgHAnL23+CN3Rt8MsuRfEtoTKpJVcCfoEoNHOkc
37
38
  -----END CERTIFICATE-----
38
- date: 2024-01-24 00:00:00.000000000 Z
39
+ date: 2024-02-08 00:00:00.000000000 Z
39
40
  dependencies:
40
41
  - !ruby/object:Gem::Dependency
41
42
  name: ffi
@@ -282,7 +283,7 @@ signing_key:
282
283
  specification_version: 4
283
284
  summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.
284
285
  It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+
285
- and Ruby 2.4+.
286
+ and Ruby 2.7+.
286
287
  test_files:
287
288
  - spec/rdkafka/abstract_handle_spec.rb
288
289
  - spec/rdkafka/admin/create_acl_handle_spec.rb
metadata.gz.sig CHANGED
Binary file