karafka 1.2.3 → 1.2.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +3 -0
- data/Gemfile.lock +3 -3
- data/lib/karafka/attributes_map.rb +1 -1
- data/lib/karafka/schemas/consumer_group.rb +1 -0
- data/lib/karafka/setup/config.rb +10 -6
- data/lib/karafka/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 76f863235897ae7dc92ee3d8353acb6cc3593cffe609dab00b70af78b4895d49
|
4
|
+
data.tar.gz: 8aaa8ee35a193b16fd34029e44e9a4063255e765f236d8268b22d644fcde4675
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 26f5aa227da31b07612cd363bcdcccc4fec974061874fdd9878fd6afa9cd952a8901d83d19dfea9130f7c34e3704bb57b3e5d974b7875ad6c0027f50c7ab78ce
|
7
|
+
data.tar.gz: 9bae8c9fc22f27d4e1713d269a37b490f3d461ca2b3fe6fdfda3ae2adebc69d284441860d33c0ceb7a67be7e215e05896da434cbaaebd069c25e4ca90865c7df
|
data/CHANGELOG.md
CHANGED
data/Gemfile.lock
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
karafka (1.2.
|
4
|
+
karafka (1.2.4)
|
5
5
|
activesupport (>= 4.0)
|
6
6
|
dry-configurable (~> 0.7)
|
7
7
|
dry-inflector (~> 0.1.1)
|
@@ -73,7 +73,7 @@ GEM
|
|
73
73
|
concurrent-ruby (~> 1.0)
|
74
74
|
inflecto (0.0.2)
|
75
75
|
json (2.1.0)
|
76
|
-
king_konf (0.2
|
76
|
+
king_konf (0.3.2)
|
77
77
|
minitest (5.11.3)
|
78
78
|
multi_json (1.13.1)
|
79
79
|
null-logger (0.1.5)
|
@@ -93,7 +93,7 @@ GEM
|
|
93
93
|
diff-lcs (>= 1.2.0, < 2.0)
|
94
94
|
rspec-support (~> 3.7.0)
|
95
95
|
rspec-support (3.7.1)
|
96
|
-
ruby-kafka (0.6.
|
96
|
+
ruby-kafka (0.6.3)
|
97
97
|
simplecov (0.16.1)
|
98
98
|
docile (~> 1.1)
|
99
99
|
json (>= 1.8, < 3)
|
@@ -18,7 +18,7 @@ module Karafka
|
|
18
18
|
{
|
19
19
|
consumer: %i[
|
20
20
|
session_timeout offset_commit_interval offset_commit_threshold
|
21
|
-
offset_retention_time heartbeat_interval
|
21
|
+
offset_retention_time heartbeat_interval fetcher_max_queue_size
|
22
22
|
],
|
23
23
|
subscribe: %i[start_from_beginning max_bytes_per_partition],
|
24
24
|
consumption: %i[min_bytes max_bytes max_wait_time],
|
@@ -36,6 +36,7 @@ module Karafka
|
|
36
36
|
required(:offset_commit_threshold).filled(:int?)
|
37
37
|
required(:offset_retention_time) { none?.not > int? }
|
38
38
|
required(:heartbeat_interval).filled { (int? | float?) & gteq?(0) }
|
39
|
+
required(:fetcher_max_queue_size).filled(:int?, gt?: 0)
|
39
40
|
required(:connect_timeout).filled { (int? | float?) & gt?(0) }
|
40
41
|
required(:socket_timeout).filled { (int? | float?) & gt?(0) }
|
41
42
|
required(:min_bytes).filled(:int?, gt?: 0)
|
data/lib/karafka/setup/config.rb
CHANGED
@@ -69,14 +69,14 @@ module Karafka
|
|
69
69
|
setting :seed_brokers
|
70
70
|
# option session_timeout [Integer] the number of seconds after which, if a client
|
71
71
|
# hasn't contacted the Kafka cluster, it will be kicked out of the group.
|
72
|
+
setting :pause_timeout, 10
|
73
|
+
# option offset_commit_interval [Integer] the interval between offset commits,
|
74
|
+
# in seconds.
|
72
75
|
setting :session_timeout, 30
|
73
76
|
# Time that a given partition will be paused from fetching messages, when message
|
74
77
|
# consumption fails. It allows us to process other partitions, while the error is being
|
75
78
|
# resolved and also "slows" things down, so it prevents from "eating" up all messages and
|
76
79
|
# consuming them with failed code. Use `nil` if you want to pause forever and never retry.
|
77
|
-
setting :pause_timeout, 10
|
78
|
-
# option offset_commit_interval [Integer] the interval between offset commits,
|
79
|
-
# in seconds.
|
80
80
|
setting :offset_commit_interval, 10
|
81
81
|
# option offset_commit_threshold [Integer] the number of messages that can be
|
82
82
|
# processed before their offsets are committed. If zero, offset commits are
|
@@ -85,6 +85,13 @@ module Karafka
|
|
85
85
|
# option heartbeat_interval [Integer] the interval between heartbeats; must be less
|
86
86
|
# than the session window.
|
87
87
|
setting :heartbeat_interval, 10
|
88
|
+
# option offset_retention_time [Integer] The length of the retention window, known as
|
89
|
+
# offset retention time
|
90
|
+
setting :offset_retention_time, nil
|
91
|
+
# option fetcher_max_queue_size [Integer] max number of items in the fetch queue that
|
92
|
+
# are stored for further processing. Note, that each item in the queue represents a
|
93
|
+
# response from a single broker
|
94
|
+
setting :fetcher_max_queue_size, 100
|
88
95
|
# option max_bytes_per_partition [Integer] the maximum amount of data fetched
|
89
96
|
# from a single partition at a time.
|
90
97
|
setting :max_bytes_per_partition, 1_048_576
|
@@ -111,9 +118,6 @@ module Karafka
|
|
111
118
|
# option reconnect_timeout [Integer] How long should we wait before trying to reconnect to
|
112
119
|
# Kafka cluster that went down (in seconds)
|
113
120
|
setting :reconnect_timeout, 5
|
114
|
-
# option offset_retention_time [Integer] The length of the retention window, known as
|
115
|
-
# offset retention time
|
116
|
-
setting :offset_retention_time, nil
|
117
121
|
# option connect_timeout [Integer] Sets the number of seconds to wait while connecting to
|
118
122
|
# a broker for the first time. When ruby-kafka initializes, it needs to connect to at
|
119
123
|
# least one host.
|
data/lib/karafka/version.rb
CHANGED
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: karafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.2.
|
4
|
+
version: 1.2.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Maciej Mensfeld
|
@@ -10,7 +10,7 @@ authors:
|
|
10
10
|
autorequire:
|
11
11
|
bindir: bin
|
12
12
|
cert_chain: []
|
13
|
-
date: 2018-05-
|
13
|
+
date: 2018-05-11 00:00:00.000000000 Z
|
14
14
|
dependencies:
|
15
15
|
- !ruby/object:Gem::Dependency
|
16
16
|
name: activesupport
|