karafka-rdkafka 0.19.0 → 0.19.2.rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.github/CODEOWNERS +3 -0
- data/.github/workflows/ci.yml +26 -10
- data/.github/workflows/verify-action-pins.yml +16 -0
- data/.ruby-version +1 -1
- data/CHANGELOG.md +8 -0
- data/README.md +13 -11
- data/docker-compose.yml +1 -1
- data/lib/rdkafka/bindings.rb +25 -1
- data/lib/rdkafka/config.rb +8 -4
- data/lib/rdkafka/consumer/headers.rb +14 -3
- data/lib/rdkafka/native_kafka.rb +4 -2
- data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
- data/lib/rdkafka/producer.rb +52 -35
- data/lib/rdkafka/version.rb +1 -1
- data/lib/rdkafka.rb +1 -0
- data/renovate.json +13 -1
- data/spec/rdkafka/admin_spec.rb +12 -10
- data/spec/rdkafka/bindings_spec.rb +0 -9
- data/spec/rdkafka/config_spec.rb +17 -15
- data/spec/rdkafka/consumer/headers_spec.rb +26 -10
- data/spec/rdkafka/producer/partitions_count_cache_spec.rb +359 -0
- data/spec/rdkafka/producer/partitions_count_spec.rb +359 -0
- data/spec/rdkafka/producer_spec.rb +156 -3
- data/spec/spec_helper.rb +9 -0
- data.tar.gz.sig +1 -2
- metadata +10 -3
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d274be74f9ce13d34e8b4151809a030c7965328f39929a7dd9edcda480c38b82
|
4
|
+
data.tar.gz: 2a31bab8e00b6b37479e65d717ba797bd1fa5588f6f22304396bd0f760e08782
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 21b2985059a41f125539f3921b81c2768da2b48a86e1370e81925641dfd353fa36c9f8f0f10cb5f60972eb6ad544b210e566f0f2f9f7fcc27680b64fe6ddff20
|
7
|
+
data.tar.gz: 26c74433fa18d9ffdd1e516bcaa9a8fa3c17cbbf1afe7efb2f749a8219d2ce5540adf440635ce1bf104d1e7776ccd01a598b2ff3e1d2570eb29de9e2b079fe08
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/.github/CODEOWNERS
ADDED
data/.github/workflows/ci.yml
CHANGED
@@ -6,9 +6,14 @@ concurrency:
|
|
6
6
|
|
7
7
|
on:
|
8
8
|
pull_request:
|
9
|
+
branches: [ main, master ]
|
9
10
|
push:
|
11
|
+
branches: [ main, master ]
|
10
12
|
schedule:
|
11
|
-
- cron:
|
13
|
+
- cron: '0 1 * * *'
|
14
|
+
|
15
|
+
permissions:
|
16
|
+
contents: read
|
12
17
|
|
13
18
|
env:
|
14
19
|
BUNDLE_RETRY: 6
|
@@ -26,20 +31,27 @@ jobs:
|
|
26
31
|
- '3.3'
|
27
32
|
- '3.2'
|
28
33
|
- '3.1'
|
34
|
+
- 'jruby-10.0'
|
29
35
|
include:
|
30
36
|
- ruby: '3.4'
|
31
37
|
coverage: 'true'
|
38
|
+
- ruby: 'jruby-10.0'
|
39
|
+
continue-on-error: true
|
40
|
+
|
32
41
|
steps:
|
33
|
-
- uses: actions/checkout@v4
|
42
|
+
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
43
|
+
with:
|
44
|
+
fetch-depth: 0
|
45
|
+
|
34
46
|
- name: Install package dependencies
|
35
47
|
run: "[ -e $APT_DEPS ] || sudo apt-get install -y --no-install-recommends $APT_DEPS"
|
36
48
|
|
37
|
-
- name: Start Kafka with
|
49
|
+
- name: Start Kafka with Docker Compose
|
38
50
|
run: |
|
39
51
|
docker compose up -d || (sleep 5 && docker compose up -d)
|
40
52
|
|
41
53
|
- name: Set up Ruby
|
42
|
-
uses: ruby/setup-ruby@v1
|
54
|
+
uses: ruby/setup-ruby@eaecf785f6a34567a6d97f686bbb7bccc1ac1e5c # v1.237.0
|
43
55
|
with:
|
44
56
|
ruby-version: ${{matrix.ruby}}
|
45
57
|
bundler-cache: true
|
@@ -47,15 +59,14 @@ jobs:
|
|
47
59
|
- name: Run all specs
|
48
60
|
env:
|
49
61
|
GITHUB_COVERAGE: ${{matrix.coverage}}
|
50
|
-
|
62
|
+
continue-on-error: ${{ matrix.continue-on-error || false }} # Use the matrix value if present
|
51
63
|
run: |
|
52
64
|
set -e
|
53
|
-
bundle install --
|
65
|
+
bundle install --jobs 4 --retry 3
|
54
66
|
cd ext && bundle exec rake
|
55
67
|
cd ..
|
56
68
|
bundle exec rspec
|
57
69
|
|
58
|
-
|
59
70
|
macos_build:
|
60
71
|
timeout-minutes: 30
|
61
72
|
runs-on: macos-latest
|
@@ -67,17 +78,22 @@ jobs:
|
|
67
78
|
- '3.3'
|
68
79
|
- '3.2'
|
69
80
|
- '3.1'
|
81
|
+
- 'jruby-9.4'
|
82
|
+
include:
|
83
|
+
- ruby: 'jruby-10.0'
|
84
|
+
continue-on-error: true
|
70
85
|
steps:
|
71
|
-
- uses: actions/checkout@v4
|
86
|
+
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
72
87
|
|
73
88
|
- name: Set up Ruby
|
74
|
-
uses: ruby/setup-ruby@v1
|
89
|
+
uses: ruby/setup-ruby@eaecf785f6a34567a6d97f686bbb7bccc1ac1e5c # v1.237.0
|
75
90
|
with:
|
76
91
|
ruby-version: ${{matrix.ruby}}
|
77
92
|
bundler-cache: false
|
78
93
|
|
79
94
|
- name: Build rdkafka-ruby
|
95
|
+
continue-on-error: ${{ matrix.continue-on-error || false }}
|
80
96
|
run: |
|
81
97
|
set -e
|
82
|
-
bundle install --
|
98
|
+
bundle install --jobs 4 --retry 3
|
83
99
|
cd ext && bundle exec rake
|
@@ -0,0 +1,16 @@
|
|
1
|
+
name: Verify Action Pins
|
2
|
+
on:
|
3
|
+
pull_request:
|
4
|
+
paths:
|
5
|
+
- '.github/workflows/**'
|
6
|
+
jobs:
|
7
|
+
verify:
|
8
|
+
runs-on: ubuntu-latest
|
9
|
+
steps:
|
10
|
+
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
11
|
+
- name: Check SHA pins
|
12
|
+
run: |
|
13
|
+
if grep -E -r "uses: .*/.*@(v[0-9]+|main|master)($|[[:space:]]|$)" --include="*.yml" --include="*.yaml" .github/workflows/ | grep -v "#"; then
|
14
|
+
echo "::error::Actions should use SHA pins, not tags or branch names"
|
15
|
+
exit 1
|
16
|
+
fi
|
data/.ruby-version
CHANGED
@@ -1 +1 @@
|
|
1
|
-
3.4.
|
1
|
+
3.4.3
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,13 @@
|
|
1
1
|
# Rdkafka Changelog
|
2
2
|
|
3
|
+
## 0.19.2 (Unreleased)
|
4
|
+
- [Enhancement] Replace TTL-based partition count cache with a global cache that reuses `librdkafka` statistics data when possible.
|
5
|
+
- [Enhancement] Roll out experimental jruby support.
|
6
|
+
|
7
|
+
## 0.19.1 (2025-04-07)
|
8
|
+
- [Enhancement] Support producing and consuming of headers with mulitple values (KIP-82).
|
9
|
+
- [Enhancement] Allow native Kafka customization poll time.
|
10
|
+
|
3
11
|
## 0.19.0 (2025-01-20)
|
4
12
|
- **[Breaking]** Deprecate and remove `#each_batch` due to data consistency concerns.
|
5
13
|
- [Enhancement] Bump librdkafka to 2.8.0
|
data/README.md
CHANGED
@@ -163,14 +163,16 @@ bundle exec rake produce_messages
|
|
163
163
|
|
164
164
|
| rdkafka-ruby | librdkafka | patches |
|
165
165
|
|-|-|-|
|
166
|
-
| 0.19.
|
167
|
-
| 0.
|
168
|
-
| 0.
|
169
|
-
| 0.
|
170
|
-
| 0.
|
171
|
-
| 0.
|
172
|
-
| 0.
|
173
|
-
| 0.
|
174
|
-
| 0.
|
175
|
-
| 0.
|
176
|
-
| 0.
|
166
|
+
| 0.19.2 (Unreleased) | 2.8.0 (2025-01-07) | yes |
|
167
|
+
| 0.19.1 (2025-04-07) | 2.8.0 (2025-01-07) | yes |
|
168
|
+
| 0.19.0 (2025-01-20) | 2.8.0 (2025-01-07) | yes |
|
169
|
+
| 0.18.0 (2024-11-26) | 2.6.1 (2024-11-18) | yes |
|
170
|
+
| 0.17.4 (2024-09-02) | 2.5.3 (2024-09-02) | yes |
|
171
|
+
| 0.17.0 (2024-08-01) | 2.5.0 (2024-07-10) | yes |
|
172
|
+
| 0.16.0 (2024-06-13) | 2.4.0 (2024-05-07) | no |
|
173
|
+
| 0.15.0 (2023-12-03) | 2.3.0 (2023-10-25) | no |
|
174
|
+
| 0.14.0 (2023-11-21) | 2.2.0 (2023-07-12) | no |
|
175
|
+
| 0.13.0 (2023-07-24) | 2.0.2 (2023-01-20) | no |
|
176
|
+
| 0.12.0 (2022-06-17) | 1.9.0 (2022-06-16) | no |
|
177
|
+
| 0.11.0 (2021-11-17) | 1.8.2 (2021-10-18) | no |
|
178
|
+
| 0.10.0 (2021-09-07) | 1.5.0 (2020-07-20) | no |
|
data/docker-compose.yml
CHANGED
data/lib/rdkafka/bindings.rb
CHANGED
@@ -35,6 +35,8 @@ module Rdkafka
|
|
35
35
|
RD_KAFKA_OFFSET_STORED = -1000
|
36
36
|
RD_KAFKA_OFFSET_INVALID = -1001
|
37
37
|
|
38
|
+
EMPTY_HASH = {}.freeze
|
39
|
+
|
38
40
|
class SizePtr < FFI::Struct
|
39
41
|
layout :value, :size_t
|
40
42
|
end
|
@@ -215,9 +217,31 @@ module Rdkafka
|
|
215
217
|
StatsCallback = FFI::Function.new(
|
216
218
|
:int, [:pointer, :string, :int, :pointer]
|
217
219
|
) do |_client_ptr, json, _json_len, _opaque|
|
218
|
-
# Pass the stats hash to callback in config
|
219
220
|
if Rdkafka::Config.statistics_callback
|
220
221
|
stats = JSON.parse(json)
|
222
|
+
|
223
|
+
# If user requested statistics callbacks, we can use the statistics data to get the
|
224
|
+
# partitions count for each topic when this data is published. That way we do not have
|
225
|
+
# to query this information when user is using `partition_key`. This takes around 0.02ms
|
226
|
+
# every statistics interval period (most likely every 5 seconds) and saves us from making
|
227
|
+
# any queries to the cluster for the partition count.
|
228
|
+
#
|
229
|
+
# One edge case is if user would set the `statistics.interval.ms` much higher than the
|
230
|
+
# default current partition count refresh (30 seconds). This is taken care of as the lack
|
231
|
+
# of reporting to the partitions cache will cause cache expire and blocking refresh.
|
232
|
+
#
|
233
|
+
# If user sets `topic.metadata.refresh.interval.ms` too high this is on the user.
|
234
|
+
#
|
235
|
+
# Since this cache is shared, having few consumers and/or producers in one process will
|
236
|
+
# automatically improve the querying times even with low refresh times.
|
237
|
+
(stats['topics'] || EMPTY_HASH).each do |topic_name, details|
|
238
|
+
partitions_count = details['partitions'].keys.reject { |k| k == '-1' }.size
|
239
|
+
|
240
|
+
next unless partitions_count.positive?
|
241
|
+
|
242
|
+
Rdkafka::Producer.partitions_count_cache.set(topic_name, partitions_count)
|
243
|
+
end
|
244
|
+
|
221
245
|
Rdkafka::Config.statistics_callback.call(stats)
|
222
246
|
end
|
223
247
|
|
data/lib/rdkafka/config.rb
CHANGED
@@ -233,11 +233,12 @@ module Rdkafka
|
|
233
233
|
#
|
234
234
|
# @param native_kafka_auto_start [Boolean] should the native kafka operations be started
|
235
235
|
# automatically. Defaults to true. Set to false only when doing complex initialization.
|
236
|
+
# @param native_kafka_poll_timeout_ms [Integer] ms poll time of the native Kafka
|
236
237
|
# @return [Producer] The created producer
|
237
238
|
#
|
238
239
|
# @raise [ConfigError] When the configuration contains invalid options
|
239
240
|
# @raise [ClientCreationError] When the native client cannot be created
|
240
|
-
def producer(native_kafka_auto_start: true)
|
241
|
+
def producer(native_kafka_auto_start: true, native_kafka_poll_timeout_ms: 100)
|
241
242
|
# Create opaque
|
242
243
|
opaque = Opaque.new
|
243
244
|
# Create Kafka config
|
@@ -254,7 +255,8 @@ module Rdkafka
|
|
254
255
|
kafka,
|
255
256
|
run_polling_thread: true,
|
256
257
|
opaque: opaque,
|
257
|
-
auto_start: native_kafka_auto_start
|
258
|
+
auto_start: native_kafka_auto_start,
|
259
|
+
timeout_ms: native_kafka_poll_timeout_ms
|
258
260
|
),
|
259
261
|
partitioner_name
|
260
262
|
).tap do |producer|
|
@@ -266,11 +268,12 @@ module Rdkafka
|
|
266
268
|
#
|
267
269
|
# @param native_kafka_auto_start [Boolean] should the native kafka operations be started
|
268
270
|
# automatically. Defaults to true. Set to false only when doing complex initialization.
|
271
|
+
# @param native_kafka_poll_timeout_ms [Integer] ms poll time of the native Kafka
|
269
272
|
# @return [Admin] The created admin instance
|
270
273
|
#
|
271
274
|
# @raise [ConfigError] When the configuration contains invalid options
|
272
275
|
# @raise [ClientCreationError] When the native client cannot be created
|
273
|
-
def admin(native_kafka_auto_start: true)
|
276
|
+
def admin(native_kafka_auto_start: true, native_kafka_poll_timeout_ms: 100)
|
274
277
|
opaque = Opaque.new
|
275
278
|
config = native_config(opaque)
|
276
279
|
Rdkafka::Bindings.rd_kafka_conf_set_background_event_cb(config, Rdkafka::Callbacks::BackgroundEventCallbackFunction)
|
@@ -282,7 +285,8 @@ module Rdkafka
|
|
282
285
|
kafka,
|
283
286
|
run_polling_thread: true,
|
284
287
|
opaque: opaque,
|
285
|
-
auto_start: native_kafka_auto_start
|
288
|
+
auto_start: native_kafka_auto_start,
|
289
|
+
timeout_ms: native_kafka_poll_timeout_ms
|
286
290
|
)
|
287
291
|
)
|
288
292
|
end
|
@@ -7,11 +7,13 @@ module Rdkafka
|
|
7
7
|
EMPTY_HEADERS = {}.freeze
|
8
8
|
|
9
9
|
# Reads a librdkafka native message's headers and returns them as a Ruby Hash
|
10
|
+
# where each key maps to either a String (single value) or Array<String> (multiple values)
|
11
|
+
# to support duplicate headers per KIP-82
|
10
12
|
#
|
11
13
|
# @private
|
12
14
|
#
|
13
15
|
# @param [Rdkafka::Bindings::Message] native_message
|
14
|
-
# @return [Hash<String, String
|
16
|
+
# @return [Hash<String, String|Array<String>>] headers Hash for the native_message
|
15
17
|
# @raise [Rdkafka::RdkafkaError] when fail to read headers
|
16
18
|
def self.from_native(native_message)
|
17
19
|
headers_ptrptr = FFI::MemoryPointer.new(:pointer)
|
@@ -53,10 +55,19 @@ module Rdkafka
|
|
53
55
|
size = size_ptr[:value]
|
54
56
|
|
55
57
|
value_ptr = value_ptrptr.read_pointer
|
56
|
-
|
57
58
|
value = value_ptr.read_string(size)
|
58
59
|
|
59
|
-
headers
|
60
|
+
if headers.key?(name)
|
61
|
+
# If we've seen this header before, convert to array if needed and append
|
62
|
+
if headers[name].is_a?(Array)
|
63
|
+
headers[name] << value
|
64
|
+
else
|
65
|
+
headers[name] = [headers[name], value]
|
66
|
+
end
|
67
|
+
else
|
68
|
+
# First occurrence - store as single value
|
69
|
+
headers[name] = value
|
70
|
+
end
|
60
71
|
|
61
72
|
idx += 1
|
62
73
|
end
|
data/lib/rdkafka/native_kafka.rb
CHANGED
@@ -4,7 +4,7 @@ module Rdkafka
|
|
4
4
|
# @private
|
5
5
|
# A wrapper around a native kafka that polls and cleanly exits
|
6
6
|
class NativeKafka
|
7
|
-
def initialize(inner, run_polling_thread:, opaque:, auto_start: true)
|
7
|
+
def initialize(inner, run_polling_thread:, opaque:, auto_start: true, timeout_ms: 100)
|
8
8
|
@inner = inner
|
9
9
|
@opaque = opaque
|
10
10
|
# Lock around external access
|
@@ -30,6 +30,8 @@ module Rdkafka
|
|
30
30
|
|
31
31
|
@run_polling_thread = run_polling_thread
|
32
32
|
|
33
|
+
@timeout_ms = timeout_ms
|
34
|
+
|
33
35
|
start if auto_start
|
34
36
|
|
35
37
|
@closing = false
|
@@ -50,7 +52,7 @@ module Rdkafka
|
|
50
52
|
@polling_thread = Thread.new do
|
51
53
|
loop do
|
52
54
|
@poll_mutex.synchronize do
|
53
|
-
Rdkafka::Bindings.rd_kafka_poll(@inner,
|
55
|
+
Rdkafka::Bindings.rd_kafka_poll(@inner, @timeout_ms)
|
54
56
|
end
|
55
57
|
|
56
58
|
# Exit thread if closing and the poll queue is empty
|
@@ -0,0 +1,216 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Rdkafka
|
4
|
+
class Producer
|
5
|
+
# Caching mechanism for Kafka topic partition counts to avoid frequent cluster queries
|
6
|
+
#
|
7
|
+
# This cache is designed to optimize the process of obtaining partition counts for topics.
|
8
|
+
# It uses several strategies to minimize Kafka cluster queries:
|
9
|
+
#
|
10
|
+
# @note Design considerations:
|
11
|
+
#
|
12
|
+
# 1. Statistics-based updates
|
13
|
+
# When statistics callbacks are enabled (via `statistics.interval.ms`), we leverage
|
14
|
+
# this data to proactively update the partition counts cache. This approach costs
|
15
|
+
# approximately 0.02ms of processing time during each statistics interval (typically
|
16
|
+
# every 5 seconds) but eliminates the need for explicit blocking metadata queries.
|
17
|
+
#
|
18
|
+
# 2. Edge case handling
|
19
|
+
# If a user configures `statistics.interval.ms` much higher than the default cache TTL
|
20
|
+
# (30 seconds), the cache will still function correctly. When statistics updates don't
|
21
|
+
# occur frequently enough, the cache entries will expire naturally, triggering a
|
22
|
+
# blocking refresh when needed.
|
23
|
+
#
|
24
|
+
# 3. User configuration awareness
|
25
|
+
# The cache respects user-defined settings. If `topic.metadata.refresh.interval.ms` is
|
26
|
+
# set very high, the responsibility for potentially stale data falls on the user. This
|
27
|
+
# is an explicit design choice to honor user configuration preferences and align with
|
28
|
+
# librdkafka settings.
|
29
|
+
#
|
30
|
+
# 4. Process-wide efficiency
|
31
|
+
# Since this cache is shared across all Rdkafka producers and consumers within a process,
|
32
|
+
# having multiple clients improves overall efficiency. Each client contributes to keeping
|
33
|
+
# the cache updated, benefiting all other clients.
|
34
|
+
#
|
35
|
+
# 5. Thread-safety approach
|
36
|
+
# The implementation uses fine-grained locking with per-topic mutexes to minimize
|
37
|
+
# contention in multi-threaded environments while ensuring data consistency.
|
38
|
+
#
|
39
|
+
# 6. Topic recreation handling
|
40
|
+
# If a topic is deleted and recreated with fewer partitions, the cache will continue to
|
41
|
+
# report the higher count until either the TTL expires or the process is restarted. This
|
42
|
+
# design choice simplifies the implementation while relying on librdkafka's error handling
|
43
|
+
# for edge cases. In production environments, topic recreation with different partition
|
44
|
+
# counts is typically accompanied by application restarts to handle structural changes.
|
45
|
+
# This also aligns with the previous cache implementation.
|
46
|
+
class PartitionsCountCache
|
47
|
+
include Helpers::Time
|
48
|
+
|
49
|
+
# Default time-to-live for cached partition counts in seconds
|
50
|
+
#
|
51
|
+
# @note This default was chosen to balance freshness of metadata with performance
|
52
|
+
# optimization. Most Kafka cluster topology changes are planned operations, making 30
|
53
|
+
# seconds a reasonable compromise.
|
54
|
+
DEFAULT_TTL = 30
|
55
|
+
|
56
|
+
# Creates a new partition count cache
|
57
|
+
#
|
58
|
+
# @param ttl [Integer] Time-to-live in seconds for cached values
|
59
|
+
def initialize(ttl = DEFAULT_TTL)
|
60
|
+
@counts = {}
|
61
|
+
@mutex_hash = {}
|
62
|
+
# Used only for @mutex_hash access to ensure thread-safety when creating new mutexes
|
63
|
+
@mutex_for_hash = Mutex.new
|
64
|
+
@ttl = ttl
|
65
|
+
end
|
66
|
+
|
67
|
+
# Reads partition count for a topic with automatic refresh when expired
|
68
|
+
#
|
69
|
+
# This method will return the cached partition count if available and not expired.
|
70
|
+
# If the value is expired or not available, it will execute the provided block
|
71
|
+
# to fetch the current value from Kafka.
|
72
|
+
#
|
73
|
+
# @param topic [String] Kafka topic name
|
74
|
+
# @yield Block that returns the current partition count when cache needs refreshing
|
75
|
+
# @yieldreturn [Integer] Current partition count retrieved from Kafka
|
76
|
+
# @return [Integer] Partition count for the topic
|
77
|
+
#
|
78
|
+
# @note The implementation prioritizes read performance over write consistency
|
79
|
+
# since partition counts typically only increase during normal operation.
|
80
|
+
def get(topic)
|
81
|
+
current_info = @counts[topic]
|
82
|
+
|
83
|
+
if current_info.nil? || expired?(current_info[0])
|
84
|
+
new_count = yield
|
85
|
+
|
86
|
+
if current_info.nil?
|
87
|
+
# No existing data, create a new entry with mutex
|
88
|
+
set(topic, new_count)
|
89
|
+
|
90
|
+
return new_count
|
91
|
+
else
|
92
|
+
current_count = current_info[1]
|
93
|
+
|
94
|
+
if new_count > current_count
|
95
|
+
# Higher value needs mutex to update both timestamp and count
|
96
|
+
set(topic, new_count)
|
97
|
+
|
98
|
+
return new_count
|
99
|
+
else
|
100
|
+
# Same or lower value, just update timestamp without mutex
|
101
|
+
refresh_timestamp(topic)
|
102
|
+
|
103
|
+
return current_count
|
104
|
+
end
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
current_info[1]
|
109
|
+
end
|
110
|
+
|
111
|
+
# Update partition count for a topic when needed
|
112
|
+
#
|
113
|
+
# This method updates the partition count for a topic in the cache.
|
114
|
+
# It uses a mutex to ensure thread-safety during updates.
|
115
|
+
#
|
116
|
+
# @param topic [String] Kafka topic name
|
117
|
+
# @param new_count [Integer] New partition count value
|
118
|
+
#
|
119
|
+
# @note We prioritize higher partition counts and only accept them when using
|
120
|
+
# a mutex to ensure consistency. This design decision is based on the fact that
|
121
|
+
# partition counts in Kafka only increase during normal operation.
|
122
|
+
def set(topic, new_count)
|
123
|
+
# First check outside mutex to avoid unnecessary locking
|
124
|
+
current_info = @counts[topic]
|
125
|
+
|
126
|
+
# For lower values, we don't update count but might need to refresh timestamp
|
127
|
+
if current_info && new_count < current_info[1]
|
128
|
+
refresh_timestamp(topic)
|
129
|
+
|
130
|
+
return
|
131
|
+
end
|
132
|
+
|
133
|
+
# Only lock the specific topic mutex
|
134
|
+
mutex_for(topic).synchronize do
|
135
|
+
# Check again inside the lock as another thread might have updated
|
136
|
+
current_info = @counts[topic]
|
137
|
+
|
138
|
+
if current_info.nil?
|
139
|
+
# Create new entry
|
140
|
+
@counts[topic] = [monotonic_now, new_count]
|
141
|
+
else
|
142
|
+
current_count = current_info[1]
|
143
|
+
|
144
|
+
if new_count > current_count
|
145
|
+
# Update to higher count value
|
146
|
+
current_info[0] = monotonic_now
|
147
|
+
current_info[1] = new_count
|
148
|
+
else
|
149
|
+
# Same or lower count, update timestamp only
|
150
|
+
current_info[0] = monotonic_now
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
# @return [Hash] hash with ttls and partitions counts array
|
157
|
+
def to_h
|
158
|
+
@counts
|
159
|
+
end
|
160
|
+
|
161
|
+
private
|
162
|
+
|
163
|
+
# Get or create a mutex for a specific topic
|
164
|
+
#
|
165
|
+
# This method ensures that each topic has its own mutex,
|
166
|
+
# allowing operations on different topics to proceed in parallel.
|
167
|
+
#
|
168
|
+
# @param topic [String] Kafka topic name
|
169
|
+
# @return [Mutex] Mutex for the specified topic
|
170
|
+
#
|
171
|
+
# @note We use a separate mutex (@mutex_for_hash) to protect the creation
|
172
|
+
# of new topic mutexes. This pattern allows fine-grained locking while
|
173
|
+
# maintaining thread-safety.
|
174
|
+
def mutex_for(topic)
|
175
|
+
mutex = @mutex_hash[topic]
|
176
|
+
|
177
|
+
return mutex if mutex
|
178
|
+
|
179
|
+
# Use a separate mutex to protect the creation of new topic mutexes
|
180
|
+
@mutex_for_hash.synchronize do
|
181
|
+
# Check again in case another thread created it
|
182
|
+
@mutex_hash[topic] ||= Mutex.new
|
183
|
+
end
|
184
|
+
|
185
|
+
@mutex_hash[topic]
|
186
|
+
end
|
187
|
+
|
188
|
+
# Update the timestamp without acquiring the mutex
|
189
|
+
#
|
190
|
+
# This is an optimization that allows refreshing the TTL of existing entries
|
191
|
+
# without the overhead of mutex acquisition.
|
192
|
+
#
|
193
|
+
# @param topic [String] Kafka topic name
|
194
|
+
#
|
195
|
+
# @note This method is safe for refreshing existing data regardless of count
|
196
|
+
# because it only updates the timestamp, which doesn't affect the correctness
|
197
|
+
# of concurrent operations.
|
198
|
+
def refresh_timestamp(topic)
|
199
|
+
current_info = @counts[topic]
|
200
|
+
|
201
|
+
return unless current_info
|
202
|
+
|
203
|
+
# Update the timestamp in-place
|
204
|
+
current_info[0] = monotonic_now
|
205
|
+
end
|
206
|
+
|
207
|
+
# Check if a timestamp has expired based on the TTL
|
208
|
+
#
|
209
|
+
# @param timestamp [Float] Monotonic timestamp to check
|
210
|
+
# @return [Boolean] true if expired, false otherwise
|
211
|
+
def expired?(timestamp)
|
212
|
+
monotonic_now - timestamp > @ttl
|
213
|
+
end
|
214
|
+
end
|
215
|
+
end
|
216
|
+
end
|