karafka 1.4.8 → 1.4.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/CHANGELOG.md +4 -1
- data/Gemfile.lock +14 -16
- data/README.md +23 -24
- data/lib/karafka/setup/config.rb +55 -55
- data/lib/karafka/version.rb +1 -1
- data.tar.gz.sig +0 -0
- metadata +3 -4
- metadata.gz.sig +0 -0
- data/.github/FUNDING.yml +0 -3
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 54b5bde8c7a61dbf95021a53f40a5789eb29a5b297e6639a616eac5a0883391d
|
|
4
|
+
data.tar.gz: faae48e60cba546b503a222b4c02e5165e7b6934bfab4414d0116e30b4f79089
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: ca53d510bd6a0e2c6efb7cec8a4b298c5668ef738943b1fa97acace0b7f9a4a89e9e2f1e45ba0ae3e3202613b4259ff8e1408a7ebf727566f11863bf4f7c637b
|
|
7
|
+
data.tar.gz: a5f735ca604ef83fb324c1c011ad4f3f9cdfa57009c3eb88a852b1a9c41bffdad7f87fd936c1b5b95076b38535fcaa38dd3aa9afdac355098f071578fef81dad
|
checksums.yaml.gz.sig
CHANGED
|
Binary file
|
data/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,8 @@
|
|
|
1
1
|
# Karafka framework changelog
|
|
2
2
|
|
|
3
|
+
## 1.4.9 (2021-09-29)
|
|
4
|
+
- fix `dry-configurable` deprecation warnings for default value as positional argument
|
|
5
|
+
|
|
3
6
|
## 1.4.8 (2021-09-08)
|
|
4
7
|
- Allow 'rails' in Gemfile to enable rails-aware generator (rewritten)
|
|
5
8
|
|
|
@@ -31,7 +34,7 @@
|
|
|
31
34
|
|
|
32
35
|
## 1.4.0 (2020-09-05)
|
|
33
36
|
- Rename `Karafka::Params::Metadata` to `Karafka::Params::BatchMetadata`
|
|
34
|
-
|
|
37
|
+
- Rename consumer `#metadata` to `#batch_metadata`
|
|
35
38
|
- Separate metadata (including Karafka native metadata) from the root of params (backwards compatibility preserved thanks to rabotyaga)
|
|
36
39
|
- Remove metadata hash dependency
|
|
37
40
|
- Remove params dependency on a hash in favour of PORO
|
data/Gemfile.lock
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
PATH
|
|
2
2
|
remote: .
|
|
3
3
|
specs:
|
|
4
|
-
karafka (1.4.
|
|
4
|
+
karafka (1.4.9)
|
|
5
5
|
dry-configurable (~> 0.8)
|
|
6
6
|
dry-inflector (~> 0.1)
|
|
7
7
|
dry-monitor (~> 0.3)
|
|
@@ -31,15 +31,14 @@ GEM
|
|
|
31
31
|
digest-crc (0.6.4)
|
|
32
32
|
rake (>= 12.0.0, < 14.0.0)
|
|
33
33
|
docile (1.4.0)
|
|
34
|
-
dry-configurable (0.
|
|
34
|
+
dry-configurable (0.13.0)
|
|
35
35
|
concurrent-ruby (~> 1.0)
|
|
36
|
-
dry-core (~> 0.
|
|
37
|
-
dry-container (0.
|
|
36
|
+
dry-core (~> 0.6)
|
|
37
|
+
dry-container (0.9.0)
|
|
38
38
|
concurrent-ruby (~> 1.0)
|
|
39
|
-
dry-configurable (~> 0.
|
|
39
|
+
dry-configurable (~> 0.13, >= 0.13.0)
|
|
40
40
|
dry-core (0.7.1)
|
|
41
41
|
concurrent-ruby (~> 1.0)
|
|
42
|
-
dry-equalizer (0.3.0)
|
|
43
42
|
dry-events (0.3.0)
|
|
44
43
|
concurrent-ruby (~> 1.0)
|
|
45
44
|
dry-core (~> 0.5, >= 0.5)
|
|
@@ -48,13 +47,13 @@ GEM
|
|
|
48
47
|
dry-logic (1.2.0)
|
|
49
48
|
concurrent-ruby (~> 1.0)
|
|
50
49
|
dry-core (~> 0.5, >= 0.5)
|
|
51
|
-
dry-monitor (0.
|
|
52
|
-
dry-configurable (~> 0.
|
|
50
|
+
dry-monitor (0.5.0)
|
|
51
|
+
dry-configurable (~> 0.13, >= 0.13.0)
|
|
53
52
|
dry-core (~> 0.5, >= 0.5)
|
|
54
53
|
dry-events (~> 0.2)
|
|
55
|
-
dry-schema (1.
|
|
54
|
+
dry-schema (1.8.0)
|
|
56
55
|
concurrent-ruby (~> 1.0)
|
|
57
|
-
dry-configurable (~> 0.
|
|
56
|
+
dry-configurable (~> 0.13, >= 0.13.0)
|
|
58
57
|
dry-core (~> 0.5, >= 0.5)
|
|
59
58
|
dry-initializer (~> 3.0)
|
|
60
59
|
dry-logic (~> 1.0)
|
|
@@ -65,13 +64,12 @@ GEM
|
|
|
65
64
|
dry-core (~> 0.5, >= 0.5)
|
|
66
65
|
dry-inflector (~> 0.1, >= 0.1.2)
|
|
67
66
|
dry-logic (~> 1.0, >= 1.0.2)
|
|
68
|
-
dry-validation (1.
|
|
67
|
+
dry-validation (1.7.0)
|
|
69
68
|
concurrent-ruby (~> 1.0)
|
|
70
69
|
dry-container (~> 0.7, >= 0.7.1)
|
|
71
|
-
dry-core (~> 0.
|
|
72
|
-
dry-equalizer (~> 0.2)
|
|
70
|
+
dry-core (~> 0.5, >= 0.5)
|
|
73
71
|
dry-initializer (~> 3.0)
|
|
74
|
-
dry-schema (~> 1.
|
|
72
|
+
dry-schema (~> 1.8, >= 1.8.0)
|
|
75
73
|
envlogic (1.1.3)
|
|
76
74
|
dry-inflector (~> 0.1)
|
|
77
75
|
factory_bot (6.2.0)
|
|
@@ -110,7 +108,7 @@ GEM
|
|
|
110
108
|
thor (1.1.0)
|
|
111
109
|
tzinfo (2.0.4)
|
|
112
110
|
concurrent-ruby (~> 1.0)
|
|
113
|
-
waterdrop (1.4.
|
|
111
|
+
waterdrop (1.4.3)
|
|
114
112
|
delivery_boy (>= 0.2, < 2.x)
|
|
115
113
|
dry-configurable (~> 0.8)
|
|
116
114
|
dry-monitor (~> 0.3)
|
|
@@ -133,4 +131,4 @@ DEPENDENCIES
|
|
|
133
131
|
simplecov
|
|
134
132
|
|
|
135
133
|
BUNDLED WITH
|
|
136
|
-
2.2.
|
|
134
|
+
2.2.28
|
data/README.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
[](https://github.com/karafka/karafka/actions/workflows/ci.yml)
|
|
4
4
|
[](http://badge.fury.io/rb/karafka)
|
|
5
|
-
[](https://slack.karafka.io)
|
|
6
6
|
|
|
7
7
|
**Note**: We're finishing the new Karafka `2.0` but for now, please use `1.4`. All the documentation presented here refers to `1.4`
|
|
8
8
|
|
|
@@ -10,6 +10,24 @@
|
|
|
10
10
|
|
|
11
11
|
Framework used to simplify Apache Kafka based Ruby applications development.
|
|
12
12
|
|
|
13
|
+
```ruby
|
|
14
|
+
# Define what topics you want to consume with which consumers
|
|
15
|
+
Karafka::App.consumer_groups.draw do
|
|
16
|
+
topic 'system_events' do
|
|
17
|
+
consumer EventsConsumer
|
|
18
|
+
end
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
# And create your consumers, within which your messages will be processed
|
|
22
|
+
class EventsConsumer < ApplicationConsumer
|
|
23
|
+
# Example that utilizes ActiveRecord#insert_all and Karafka batch processing
|
|
24
|
+
def consume
|
|
25
|
+
# Store all of the incoming Kafka events locally in an efficient way
|
|
26
|
+
Event.insert_all params_batch.payloads
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
```
|
|
30
|
+
|
|
13
31
|
Karafka allows you to capture everything that happens in your systems in large scale, providing you with a seamless and stable core for consuming and processing this data, without having to focus on things that are not your business domain.
|
|
14
32
|
|
|
15
33
|
Karafka not only handles incoming messages but also provides tools for building complex data-flow applications that receive and send messages.
|
|
@@ -35,7 +53,7 @@ Karafka based applications can be easily deployed to any type of infrastructure,
|
|
|
35
53
|
|
|
36
54
|
## Support
|
|
37
55
|
|
|
38
|
-
Karafka has
|
|
56
|
+
Karafka has [Wiki pages](https://github.com/karafka/karafka/wiki) for almost everything and a pretty decent [FAQ](https://github.com/karafka/karafka/wiki/FAQ). It covers the whole installation, setup, and deployment along with other useful details on how to run Karafka.
|
|
39
57
|
|
|
40
58
|
If you have any questions about using Karafka, feel free to join our [Gitter](https://gitter.im/karafka/karafka) chat channel.
|
|
41
59
|
|
|
@@ -65,10 +83,6 @@ and follow the instructions from the [example app Wiki](https://github.com/karaf
|
|
|
65
83
|
|
|
66
84
|
If you need more details and know how on how to start Karafka with a clean installation, read the [Getting started page](https://github.com/karafka/karafka/wiki/Getting-started) section of our Wiki.
|
|
67
85
|
|
|
68
|
-
## Notice
|
|
69
|
-
|
|
70
|
-
Karafka framework and Karafka team are __not__ related to Kafka streaming service called CloudKarafka in any matter. We don't recommend nor discourage usage of their platform.
|
|
71
|
-
|
|
72
86
|
## References
|
|
73
87
|
|
|
74
88
|
* [Karafka framework](https://github.com/karafka/karafka)
|
|
@@ -77,23 +91,8 @@ Karafka framework and Karafka team are __not__ related to Kafka streaming servic
|
|
|
77
91
|
|
|
78
92
|
## Note on contributions
|
|
79
93
|
|
|
80
|
-
First, thank you for considering contributing to Karafka! It's people like you that make the open source community such a great community!
|
|
81
|
-
|
|
82
|
-
Each pull request must pass all the RSpec specs and meet our quality requirements.
|
|
83
|
-
|
|
84
|
-
To check if everything is as it should be, we use [Coditsu](https://coditsu.io) that combines multiple linters and code analyzers for both code and documentation. Once you're done with your changes, submit a pull request.
|
|
85
|
-
|
|
86
|
-
Coditsu will automatically check your work against our quality standards. You can find your commit check results on the [builds page](https://app.coditsu.io/karafka/commit_builds) of Karafka organization.
|
|
87
|
-
|
|
88
|
-
[](https://app.coditsu.io/karafka/commit_builds)
|
|
89
|
-
|
|
90
|
-
## Contributors
|
|
91
|
-
|
|
92
|
-
This project exists thanks to all the people who contribute.
|
|
93
|
-
<a href="https://github.com/karafka/karafka/graphs/contributors"><img src="https://opencollective.com/karafka/contributors.svg?width=890" /></a>
|
|
94
|
-
|
|
95
|
-
## Sponsors
|
|
94
|
+
First, thank you for considering contributing to the Karafka ecosystem! It's people like you that make the open source community such a great community!
|
|
96
95
|
|
|
97
|
-
|
|
96
|
+
Each pull request must pass all the RSpec specs, integration tests and meet our quality requirements.
|
|
98
97
|
|
|
99
|
-
|
|
98
|
+
Fork it, update and wait for the Github Actions results.
|
data/lib/karafka/setup/config.rb
CHANGED
|
@@ -24,89 +24,89 @@ module Karafka
|
|
|
24
24
|
# default Kafka groups namespaces and identify that app in kafka
|
|
25
25
|
setting :client_id
|
|
26
26
|
# What backend do we want to use to process messages
|
|
27
|
-
setting :backend, :inline
|
|
27
|
+
setting :backend, default: :inline
|
|
28
28
|
# option logger [Instance] logger that we want to use
|
|
29
|
-
setting :logger, ::Karafka::Instrumentation::Logger.new
|
|
29
|
+
setting :logger, default: ::Karafka::Instrumentation::Logger.new
|
|
30
30
|
# option monitor [Instance] monitor that we will to use (defaults to Karafka::Monitor)
|
|
31
|
-
setting :monitor, ::Karafka::Instrumentation::Monitor.new
|
|
31
|
+
setting :monitor, default: ::Karafka::Instrumentation::Monitor.new
|
|
32
32
|
# Mapper used to remap consumer groups ids, so in case users migrate from other tools
|
|
33
33
|
# or they need to maintain their own internal consumer group naming conventions, they
|
|
34
34
|
# can easily do it, replacing the default client_id + consumer name pattern concept
|
|
35
|
-
setting :consumer_mapper, Routing::ConsumerMapper.new
|
|
35
|
+
setting :consumer_mapper, default: Routing::ConsumerMapper.new
|
|
36
36
|
# Mapper used to remap names of topics, so we can have a clean internal topic naming
|
|
37
37
|
# despite using any Kafka provider that uses namespacing, etc
|
|
38
38
|
# It needs to implement two methods:
|
|
39
39
|
# - #incoming - for remapping from the incoming message to our internal format
|
|
40
40
|
# - #outgoing - for remapping from internal topic name into outgoing message
|
|
41
|
-
setting :topic_mapper, Routing::TopicMapper.new
|
|
41
|
+
setting :topic_mapper, default: Routing::TopicMapper.new
|
|
42
42
|
# Default serializer for converting whatever we want to send to kafka to json
|
|
43
|
-
setting :serializer, Karafka::Serialization::Json::Serializer.new
|
|
43
|
+
setting :serializer, default: Karafka::Serialization::Json::Serializer.new
|
|
44
44
|
# Default deserializer for converting incoming data into ruby objects
|
|
45
|
-
setting :deserializer, Karafka::Serialization::Json::Deserializer.new
|
|
45
|
+
setting :deserializer, default: Karafka::Serialization::Json::Deserializer.new
|
|
46
46
|
# If batch_fetching is true, we will fetch kafka messages in batches instead of 1 by 1
|
|
47
47
|
# @note Fetching does not equal consuming, see batch_consuming description for details
|
|
48
|
-
setting :batch_fetching, true
|
|
48
|
+
setting :batch_fetching, default: true
|
|
49
49
|
# If batch_consuming is true, we will have access to #params_batch instead of #params.
|
|
50
50
|
# #params_batch will contain params received from Kafka (may be more than 1) so we can
|
|
51
51
|
# process them in batches
|
|
52
|
-
setting :batch_consuming, false
|
|
52
|
+
setting :batch_consuming, default: false
|
|
53
53
|
# option shutdown_timeout [Integer, nil] the number of seconds after which Karafka no
|
|
54
54
|
# longer wait for the consumers to stop gracefully but instead we force terminate
|
|
55
55
|
# everything.
|
|
56
|
-
setting :shutdown_timeout, 60
|
|
56
|
+
setting :shutdown_timeout, default: 60
|
|
57
57
|
|
|
58
58
|
# option kafka [Hash] - optional - kafka configuration options
|
|
59
59
|
setting :kafka do
|
|
60
60
|
# Array with at least one host
|
|
61
|
-
setting :seed_brokers, %w[kafka://127.0.0.1:9092]
|
|
61
|
+
setting :seed_brokers, default: %w[kafka://127.0.0.1:9092]
|
|
62
62
|
# option session_timeout [Integer] the number of seconds after which, if a client
|
|
63
63
|
# hasn't contacted the Kafka cluster, it will be kicked out of the group.
|
|
64
|
-
setting :session_timeout, 30
|
|
64
|
+
setting :session_timeout, default: 30
|
|
65
65
|
# Time that a given partition will be paused from fetching messages, when message
|
|
66
66
|
# consumption fails. It allows us to process other partitions, while the error is being
|
|
67
67
|
# resolved and also "slows" things down, so it prevents from "eating" up all messages and
|
|
68
68
|
# consuming them with failed code. Use `nil` if you want to pause forever and never retry.
|
|
69
|
-
setting :pause_timeout, 10
|
|
69
|
+
setting :pause_timeout, default: 10
|
|
70
70
|
# option pause_max_timeout [Integer, nil] the maximum number of seconds to pause for,
|
|
71
71
|
# or `nil` if no maximum should be enforced.
|
|
72
|
-
setting :pause_max_timeout, nil
|
|
72
|
+
setting :pause_max_timeout, default: nil
|
|
73
73
|
# option pause_exponential_backoff [Boolean] whether to enable exponential backoff
|
|
74
|
-
setting :pause_exponential_backoff, false
|
|
74
|
+
setting :pause_exponential_backoff, default: false
|
|
75
75
|
# option offset_commit_interval [Integer] the interval between offset commits,
|
|
76
76
|
# in seconds.
|
|
77
|
-
setting :offset_commit_interval, 10
|
|
77
|
+
setting :offset_commit_interval, default: 10
|
|
78
78
|
# option offset_commit_threshold [Integer] the number of messages that can be
|
|
79
79
|
# processed before their offsets are committed. If zero, offset commits are
|
|
80
80
|
# not triggered by message consumption.
|
|
81
|
-
setting :offset_commit_threshold, 0
|
|
81
|
+
setting :offset_commit_threshold, default: 0
|
|
82
82
|
# option heartbeat_interval [Integer] the interval between heartbeats; must be less
|
|
83
83
|
# than the session window.
|
|
84
|
-
setting :heartbeat_interval, 10
|
|
84
|
+
setting :heartbeat_interval, default: 10
|
|
85
85
|
# option offset_retention_time [Integer] The length of the retention window, known as
|
|
86
86
|
# offset retention time
|
|
87
|
-
setting :offset_retention_time, nil
|
|
87
|
+
setting :offset_retention_time, default: nil
|
|
88
88
|
# option fetcher_max_queue_size [Integer] max number of items in the fetch queue that
|
|
89
89
|
# are stored for further processing. Note, that each item in the queue represents a
|
|
90
90
|
# response from a single broker
|
|
91
|
-
setting :fetcher_max_queue_size, 10
|
|
91
|
+
setting :fetcher_max_queue_size, default: 10
|
|
92
92
|
# option assignment_strategy [Object] a strategy determining the assignment of
|
|
93
93
|
# partitions to the consumers.
|
|
94
|
-
setting :assignment_strategy, Karafka::AssignmentStrategies::RoundRobin.new
|
|
94
|
+
setting :assignment_strategy, default: Karafka::AssignmentStrategies::RoundRobin.new
|
|
95
95
|
# option max_bytes_per_partition [Integer] the maximum amount of data fetched
|
|
96
96
|
# from a single partition at a time.
|
|
97
|
-
setting :max_bytes_per_partition, 1_048_576
|
|
97
|
+
setting :max_bytes_per_partition, default: 1_048_576
|
|
98
98
|
# whether to consume messages starting at the beginning or to just consume new messages
|
|
99
|
-
setting :start_from_beginning, true
|
|
99
|
+
setting :start_from_beginning, default: true
|
|
100
100
|
# option resolve_seed_brokers [Boolean] whether to resolve each hostname of the seed
|
|
101
101
|
# brokers
|
|
102
|
-
setting :resolve_seed_brokers, false
|
|
102
|
+
setting :resolve_seed_brokers, default: false
|
|
103
103
|
# option min_bytes [Integer] the minimum number of bytes to read before
|
|
104
104
|
# returning messages from the server; if `max_wait_time` is reached, this
|
|
105
105
|
# is ignored.
|
|
106
|
-
setting :min_bytes, 1
|
|
106
|
+
setting :min_bytes, default: 1
|
|
107
107
|
# option max_bytes [Integer] the maximum number of bytes to read before returning messages
|
|
108
108
|
# from each broker.
|
|
109
|
-
setting :max_bytes, 10_485_760
|
|
109
|
+
setting :max_bytes, default: 10_485_760
|
|
110
110
|
# option max_wait_time [Integer, Float] max_wait_time is the maximum number of seconds to
|
|
111
111
|
# wait before returning data from a single message fetch. By setting this high you also
|
|
112
112
|
# increase the fetching throughput - and by setting it low you set a bound on latency.
|
|
@@ -114,65 +114,65 @@ module Karafka
|
|
|
114
114
|
# time specified. The default value is one second. If you want to have at most five
|
|
115
115
|
# seconds of latency, set `max_wait_time` to 5. You should make sure
|
|
116
116
|
# max_wait_time * num brokers + heartbeat_interval is less than session_timeout.
|
|
117
|
-
setting :max_wait_time, 1
|
|
117
|
+
setting :max_wait_time, default: 1
|
|
118
118
|
# option automatically_mark_as_consumed [Boolean] should we automatically mark received
|
|
119
119
|
# messages as consumed (processed) after non-error consumption
|
|
120
|
-
setting :automatically_mark_as_consumed, true
|
|
120
|
+
setting :automatically_mark_as_consumed, default: true
|
|
121
121
|
# option reconnect_timeout [Integer] How long should we wait before trying to reconnect to
|
|
122
122
|
# Kafka cluster that went down (in seconds)
|
|
123
|
-
setting :reconnect_timeout, 5
|
|
123
|
+
setting :reconnect_timeout, default: 5
|
|
124
124
|
# option connect_timeout [Integer] Sets the number of seconds to wait while connecting to
|
|
125
125
|
# a broker for the first time. When ruby-kafka initializes, it needs to connect to at
|
|
126
126
|
# least one host.
|
|
127
|
-
setting :connect_timeout, 10
|
|
127
|
+
setting :connect_timeout, default: 10
|
|
128
128
|
# option socket_timeout [Integer] Sets the number of seconds to wait when reading from or
|
|
129
129
|
# writing to a socket connection to a broker. After this timeout expires the connection
|
|
130
130
|
# will be killed. Note that some Kafka operations are by definition long-running, such as
|
|
131
131
|
# waiting for new messages to arrive in a partition, so don't set this value too low
|
|
132
|
-
setting :socket_timeout, 30
|
|
132
|
+
setting :socket_timeout, default: 30
|
|
133
133
|
# option partitioner [Object, nil] the partitioner that should be used by the client
|
|
134
|
-
setting :partitioner, nil
|
|
134
|
+
setting :partitioner, default: nil
|
|
135
135
|
|
|
136
136
|
# SSL authentication related settings
|
|
137
137
|
# option ca_cert [String, nil] SSL CA certificate
|
|
138
|
-
setting :ssl_ca_cert, nil
|
|
138
|
+
setting :ssl_ca_cert, default: nil
|
|
139
139
|
# option ssl_ca_cert_file_path [String, nil] SSL CA certificate file path
|
|
140
|
-
setting :ssl_ca_cert_file_path, nil
|
|
140
|
+
setting :ssl_ca_cert_file_path, default: nil
|
|
141
141
|
# option ssl_ca_certs_from_system [Boolean] Use the CA certs from your system's default
|
|
142
142
|
# certificate store
|
|
143
|
-
setting :ssl_ca_certs_from_system, false
|
|
143
|
+
setting :ssl_ca_certs_from_system, default: false
|
|
144
144
|
# option ssl_verify_hostname [Boolean] Verify the hostname for client certs
|
|
145
|
-
setting :ssl_verify_hostname, true
|
|
145
|
+
setting :ssl_verify_hostname, default: true
|
|
146
146
|
# option ssl_client_cert [String, nil] SSL client certificate
|
|
147
|
-
setting :ssl_client_cert, nil
|
|
147
|
+
setting :ssl_client_cert, default: nil
|
|
148
148
|
# option ssl_client_cert_key [String, nil] SSL client certificate password
|
|
149
|
-
setting :ssl_client_cert_key, nil
|
|
149
|
+
setting :ssl_client_cert_key, default: nil
|
|
150
150
|
# option sasl_gssapi_principal [String, nil] sasl principal
|
|
151
|
-
setting :sasl_gssapi_principal, nil
|
|
151
|
+
setting :sasl_gssapi_principal, default: nil
|
|
152
152
|
# option sasl_gssapi_keytab [String, nil] sasl keytab
|
|
153
|
-
setting :sasl_gssapi_keytab, nil
|
|
153
|
+
setting :sasl_gssapi_keytab, default: nil
|
|
154
154
|
# option sasl_plain_authzid [String] The authorization identity to use
|
|
155
|
-
setting :sasl_plain_authzid, ''
|
|
155
|
+
setting :sasl_plain_authzid, default: ''
|
|
156
156
|
# option sasl_plain_username [String, nil] The username used to authenticate
|
|
157
|
-
setting :sasl_plain_username, nil
|
|
157
|
+
setting :sasl_plain_username, default: nil
|
|
158
158
|
# option sasl_plain_password [String, nil] The password used to authenticate
|
|
159
|
-
setting :sasl_plain_password, nil
|
|
159
|
+
setting :sasl_plain_password, default: nil
|
|
160
160
|
# option sasl_scram_username [String, nil] The username used to authenticate
|
|
161
|
-
setting :sasl_scram_username, nil
|
|
161
|
+
setting :sasl_scram_username, default: nil
|
|
162
162
|
# option sasl_scram_password [String, nil] The password used to authenticate
|
|
163
|
-
setting :sasl_scram_password, nil
|
|
163
|
+
setting :sasl_scram_password, default: nil
|
|
164
164
|
# option sasl_scram_mechanism [String, nil] Scram mechanism, either 'sha256' or 'sha512'
|
|
165
|
-
setting :sasl_scram_mechanism, nil
|
|
165
|
+
setting :sasl_scram_mechanism, default: nil
|
|
166
166
|
# option sasl_over_ssl [Boolean] whether to enforce SSL with SASL
|
|
167
|
-
setting :sasl_over_ssl, true
|
|
167
|
+
setting :sasl_over_ssl, default: true
|
|
168
168
|
# option ssl_client_cert_chain [String, nil] client cert chain or nil if not used
|
|
169
|
-
setting :ssl_client_cert_chain, nil
|
|
169
|
+
setting :ssl_client_cert_chain, default: nil
|
|
170
170
|
# option ssl_client_cert_key_password [String, nil] the password required to read
|
|
171
171
|
# the ssl_client_cert_key
|
|
172
|
-
setting :ssl_client_cert_key_password, nil
|
|
172
|
+
setting :ssl_client_cert_key_password, default: nil
|
|
173
173
|
# @param sasl_oauth_token_provider [Object, nil] OAuthBearer Token Provider instance that
|
|
174
174
|
# implements method token.
|
|
175
|
-
setting :sasl_oauth_token_provider, nil
|
|
175
|
+
setting :sasl_oauth_token_provider, default: nil
|
|
176
176
|
end
|
|
177
177
|
|
|
178
178
|
# Namespace for internal settings that should not be modified
|
|
@@ -180,18 +180,18 @@ module Karafka
|
|
|
180
180
|
# non global state
|
|
181
181
|
setting :internal do
|
|
182
182
|
# option routing_builder [Karafka::Routing::Builder] builder instance
|
|
183
|
-
setting :routing_builder, Routing::Builder.new
|
|
183
|
+
setting :routing_builder, default: Routing::Builder.new
|
|
184
184
|
# option status [Karafka::Status] app status
|
|
185
|
-
setting :status, Status.new
|
|
185
|
+
setting :status, default: Status.new
|
|
186
186
|
# option process [Karafka::Process] process status
|
|
187
187
|
# @note In the future, we need to have a single process representation for all the karafka
|
|
188
188
|
# instances
|
|
189
|
-
setting :process, Process.new
|
|
189
|
+
setting :process, default: Process.new
|
|
190
190
|
# option fetcher [Karafka::Fetcher] fetcher instance
|
|
191
|
-
setting :fetcher, Fetcher.new
|
|
191
|
+
setting :fetcher, default: Fetcher.new
|
|
192
192
|
# option configurators [Array<Object>] all configurators that we want to run after
|
|
193
193
|
# the setup
|
|
194
|
-
setting :configurators, [Configurators::WaterDrop.new]
|
|
194
|
+
setting :configurators, default: [Configurators::WaterDrop.new]
|
|
195
195
|
end
|
|
196
196
|
|
|
197
197
|
class << self
|
data/lib/karafka/version.rb
CHANGED
data.tar.gz.sig
CHANGED
|
Binary file
|
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: karafka
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 1.4.
|
|
4
|
+
version: 1.4.9
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Maciej Mensfeld
|
|
@@ -36,7 +36,7 @@ cert_chain:
|
|
|
36
36
|
R2P11bWoCtr70BsccVrN8jEhzwXngMyI2gVt750Y+dbTu1KgRqZKp/ECe7ZzPzXj
|
|
37
37
|
pIy9vHxTANKYVyI4qj8OrFdEM5BQNu8oQpL0iQ==
|
|
38
38
|
-----END CERTIFICATE-----
|
|
39
|
-
date: 2021-09-
|
|
39
|
+
date: 2021-09-29 00:00:00.000000000 Z
|
|
40
40
|
dependencies:
|
|
41
41
|
- !ruby/object:Gem::Dependency
|
|
42
42
|
name: dry-configurable
|
|
@@ -191,7 +191,6 @@ files:
|
|
|
191
191
|
- ".coditsu/ci.yml"
|
|
192
192
|
- ".console_irbrc"
|
|
193
193
|
- ".diffend.yml"
|
|
194
|
-
- ".github/FUNDING.yml"
|
|
195
194
|
- ".github/ISSUE_TEMPLATE/bug_report.md"
|
|
196
195
|
- ".github/ISSUE_TEMPLATE/feature_request.md"
|
|
197
196
|
- ".github/workflows/ci.yml"
|
|
@@ -306,7 +305,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
|
306
305
|
- !ruby/object:Gem::Version
|
|
307
306
|
version: '0'
|
|
308
307
|
requirements: []
|
|
309
|
-
rubygems_version: 3.2.
|
|
308
|
+
rubygems_version: 3.2.28
|
|
310
309
|
signing_key:
|
|
311
310
|
specification_version: 4
|
|
312
311
|
summary: Ruby based framework for working with Apache Kafka
|
metadata.gz.sig
CHANGED
|
Binary file
|
data/.github/FUNDING.yml
DELETED