karafka 0.5.0.1 → 0.5.0.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +0 -1
- data/.rspec +1 -0
- data/.ruby-version +1 -1
- data/.travis.yml +3 -0
- data/CHANGELOG.md +19 -1
- data/Gemfile.lock +15 -15
- data/README.md +79 -24
- data/karafka.gemspec +2 -2
- data/lib/karafka/base_controller.rb +2 -2
- data/lib/karafka/base_responder.rb +20 -7
- data/lib/karafka/capistrano/karafka.cap +4 -3
- data/lib/karafka/cli/info.rb +2 -1
- data/lib/karafka/connection/topic_consumer.rb +41 -13
- data/lib/karafka/monitor.rb +13 -2
- data/lib/karafka/params/params.rb +1 -1
- data/lib/karafka/parsers/json.rb +36 -0
- data/lib/karafka/patches/dry_configurable.rb +33 -0
- data/lib/karafka/routing/builder.rb +2 -1
- data/lib/karafka/routing/route.rb +27 -8
- data/lib/karafka/setup/config.rb +30 -2
- data/lib/karafka/setup/configurators/water_drop.rb +2 -2
- data/lib/karafka/templates/app.rb.example +2 -1
- data/lib/karafka/version.rb +1 -1
- metadata +10 -8
- data/lib/karafka/patches/dry/configurable/config.rb +0 -37
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: b15329e4a83277e01ced5cb6e7e0419d392707a6
|
4
|
+
data.tar.gz: 6080d3348e67cd407e37472e97a400f8d648bdb0
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 109523139cb10a65641300fec95edd09b55a190ae912df450d614b016c2e76988511dbcd29510400bc7d0ce3fac139035736628dc5347e2919b927f7e0284e2d
|
7
|
+
data.tar.gz: ba2958568a92b148c956355b164f35ec6408f35a2efe55dcb880ac8fd8009acb951ed89904d71b2341ee66304def6f125b10aa4536a620c849512432d9c94e50
|
data/.gitignore
CHANGED
data/.rspec
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
--require spec_helper
|
data/.ruby-version
CHANGED
@@ -1 +1 @@
|
|
1
|
-
2.
|
1
|
+
2.4.0
|
data/.travis.yml
CHANGED
data/CHANGELOG.md
CHANGED
@@ -1,8 +1,26 @@
|
|
1
1
|
# Karafka framework changelog
|
2
2
|
|
3
|
+
## 0.5.0.2
|
4
|
+
- Gems update x3
|
5
|
+
- Default Ruby set to 2.3.3
|
6
|
+
- ~~Default Ruby set to 2.4.0~~
|
7
|
+
- Readme updates to match bug fixes and resolved issues
|
8
|
+
- #95 - Allow options into responder
|
9
|
+
- #98 - Use parser when responding on a topic
|
10
|
+
- #114 - Option to configure waterdrop connection pool timeout and concurrency
|
11
|
+
- #118 - Added dot in topic validation format
|
12
|
+
- #119 - add support for authentication using SSL
|
13
|
+
- #121 - JSON as a default for standalone responders usage
|
14
|
+
- #122 - Allow on capistrano role customization
|
15
|
+
- #125 - Add support to batch incoming messages
|
16
|
+
- #130 - start_from_beginning flag on routes and default
|
17
|
+
- #128 - Monitor caller_label not working with super on inheritance
|
18
|
+
- Renamed *inline* to *inline_mode* to stay consistent with flags that change the way karafka works (#125)
|
19
|
+
- Dry-configurable dump to 0.5 with fixed proc value evaluation on retrieve patch (internal change)
|
20
|
+
|
3
21
|
## 0.5.0.1
|
4
22
|
- Fixed inconsistency in responders non-required topic definition. Now only required: false available
|
5
|
-
- #101
|
23
|
+
- #101 - Responders fail when multiple_usage true and required false
|
6
24
|
- fix error on startup from waterdrop #102
|
7
25
|
- Waterdrop 0.3.2.1 with kafka.hosts instead of kafka_hosts
|
8
26
|
- #105 - Karafka::Monitor#caller_label not working with inherited monitors
|
data/Gemfile.lock
CHANGED
@@ -1,22 +1,22 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
karafka (0.5.0.
|
4
|
+
karafka (0.5.0.2)
|
5
5
|
activesupport (~> 5.0)
|
6
6
|
celluloid (~> 0.17)
|
7
|
-
dry-configurable (~> 0.
|
7
|
+
dry-configurable (~> 0.5)
|
8
8
|
envlogic (~> 1.0)
|
9
9
|
rake (~> 11.3)
|
10
10
|
ruby-kafka (= 0.3.15)
|
11
11
|
sidekiq (~> 4.2)
|
12
12
|
thor (~> 0.19)
|
13
|
-
waterdrop (~> 0.3)
|
13
|
+
waterdrop (~> 0.3.2.1)
|
14
14
|
worker-glass (~> 0.2)
|
15
15
|
|
16
16
|
GEM
|
17
17
|
remote: https://rubygems.org/
|
18
18
|
specs:
|
19
|
-
activesupport (5.0.
|
19
|
+
activesupport (5.0.1)
|
20
20
|
concurrent-ruby (~> 1.0, >= 1.0.2)
|
21
21
|
i18n (~> 0.7)
|
22
22
|
minitest (~> 5.1)
|
@@ -54,13 +54,13 @@ GEM
|
|
54
54
|
coercible (1.0.0)
|
55
55
|
descendants_tracker (~> 0.0.1)
|
56
56
|
colorize (0.8.1)
|
57
|
-
concurrent-ruby (1.0.
|
58
|
-
connection_pool (2.2.
|
57
|
+
concurrent-ruby (1.0.4)
|
58
|
+
connection_pool (2.2.1)
|
59
59
|
descendants_tracker (0.0.4)
|
60
60
|
thread_safe (~> 0.3, >= 0.3.1)
|
61
61
|
diff-lcs (1.2.5)
|
62
62
|
docile (1.1.5)
|
63
|
-
dry-configurable (0.
|
63
|
+
dry-configurable (0.5.0)
|
64
64
|
concurrent-ruby (~> 1.0)
|
65
65
|
envlogic (1.0.3)
|
66
66
|
activesupport
|
@@ -91,7 +91,7 @@ GEM
|
|
91
91
|
launchy (2.4.3)
|
92
92
|
addressable (~> 2.3)
|
93
93
|
method_source (0.8.2)
|
94
|
-
minitest (5.
|
94
|
+
minitest (5.10.1)
|
95
95
|
null-logger (0.1.3)
|
96
96
|
parser (2.3.1.2)
|
97
97
|
ast (~> 2.2)
|
@@ -120,7 +120,7 @@ GEM
|
|
120
120
|
rack
|
121
121
|
rainbow (2.1.0)
|
122
122
|
rake (11.3.0)
|
123
|
-
redis (3.3.
|
123
|
+
redis (3.3.3)
|
124
124
|
reek (4.1.0)
|
125
125
|
codeclimate-engine-rb (~> 0.3.1)
|
126
126
|
parser (~> 2.3.1, >= 2.3.1.2)
|
@@ -166,7 +166,7 @@ GEM
|
|
166
166
|
shoulda-context (1.2.1)
|
167
167
|
shoulda-matchers (2.8.0)
|
168
168
|
activesupport (>= 3.0.0)
|
169
|
-
sidekiq (4.2.
|
169
|
+
sidekiq (4.2.9)
|
170
170
|
concurrent-ruby (~> 1.0)
|
171
171
|
connection_pool (~> 2.2, >= 2.2.0)
|
172
172
|
rack-protection (>= 1.5.0)
|
@@ -178,11 +178,11 @@ GEM
|
|
178
178
|
simplecov-html (0.10.0)
|
179
179
|
slop (3.6.0)
|
180
180
|
sysexits (1.2.0)
|
181
|
-
thor (0.19.
|
181
|
+
thor (0.19.4)
|
182
182
|
thread_safe (0.3.5)
|
183
183
|
tilt (2.0.5)
|
184
184
|
timecop (0.8.1)
|
185
|
-
timers (4.1.
|
185
|
+
timers (4.1.2)
|
186
186
|
hitimes
|
187
187
|
tzinfo (1.2.2)
|
188
188
|
thread_safe (~> 0.1)
|
@@ -192,10 +192,10 @@ GEM
|
|
192
192
|
coercible (~> 1.0)
|
193
193
|
descendants_tracker (~> 0.0, >= 0.0.3)
|
194
194
|
equalizer (~> 0.0, >= 0.0.9)
|
195
|
-
waterdrop (0.3.2.
|
195
|
+
waterdrop (0.3.2.2)
|
196
196
|
bundler
|
197
197
|
connection_pool
|
198
|
-
dry-configurable (~> 0.
|
198
|
+
dry-configurable (~> 0.5)
|
199
199
|
null-logger
|
200
200
|
rake
|
201
201
|
ruby-kafka
|
@@ -214,4 +214,4 @@ DEPENDENCIES
|
|
214
214
|
timecop
|
215
215
|
|
216
216
|
BUNDLED WITH
|
217
|
-
1.
|
217
|
+
1.13.7
|
data/README.md
CHANGED
@@ -31,7 +31,8 @@ Karafka not only handles incoming messages but also provides tools for building
|
|
31
31
|
- [Parser](#parser)
|
32
32
|
- [Interchanger](#interchanger)
|
33
33
|
- [Responder](#responder)
|
34
|
-
- [Inline flag](#inline-flag)
|
34
|
+
- [Inline mode flag](#inline-mode-flag)
|
35
|
+
- [Batch mode flag](#batch-mode-flag)
|
35
36
|
- [Receiving messages](#receiving-messages)
|
36
37
|
- [Processing messages directly (without Sidekiq)](#processing-messages-directly-without-sidekiq)
|
37
38
|
- [Sending messages from Karafka](#sending-messages-from-karafka)
|
@@ -45,6 +46,7 @@ Karafka not only handles incoming messages but also provides tools for building
|
|
45
46
|
- [Registering topics](#registering-topics)
|
46
47
|
- [Responding on topics](#responding-on-topics)
|
47
48
|
- [Response validation](#response-validation)
|
49
|
+
- [Response partitioning](#response-partitioning)
|
48
50
|
- [Monitoring and logging](#monitoring-and-logging)
|
49
51
|
- [Example monitor with Errbit/Airbrake support](#example-monitor-with-errbitairbrake-support)
|
50
52
|
- [Example monitor with NewRelic support](#example-monitor-with-newrelic-support)
|
@@ -110,8 +112,10 @@ Karafka has following configuration options:
|
|
110
112
|
| Option | Required | Value type | Description |
|
111
113
|
|-------------------------------|----------|-------------------|------------------------------------------------------------------------------------------------------------|
|
112
114
|
| name | true | String | Application name |
|
113
|
-
| inline | false | Boolean | Do we want to perform logic without enqueuing it with Sidekiq (directly and asap) |
|
114
115
|
| redis | true | Hash | Hash with Redis configuration options |
|
116
|
+
| inline_mode | false | Boolean | Do we want to perform logic without enqueuing it with Sidekiq (directly and asap) |
|
117
|
+
| batch_mode | false | Boolean | Should the incoming messages be consumed in batches, or one at a time |
|
118
|
+
| start_from_beginning | false | Boolean | Consume messages starting at the beginning or consume new messages that are produced at first run |
|
115
119
|
| monitor | false | Object | Monitor instance (defaults to Karafka::Monitor) |
|
116
120
|
| logger | false | Object | Logger instance (defaults to Karafka::Logger) |
|
117
121
|
| kafka.hosts | false | Array<String> | Kafka server hosts. If 1 provided, Karafka will discover cluster structure automatically |
|
@@ -119,6 +123,11 @@ Karafka has following configuration options:
|
|
119
123
|
| kafka.offset_commit_interval | false | Integer | The interval between offset commits in seconds |
|
120
124
|
| kafka.offset_commit_threshold | false | Integer | The number of messages that can be processed before their offsets are committed |
|
121
125
|
| kafka.heartbeat_interval | false | Integer | The interval between heartbeats |
|
126
|
+
| kafka.ssl.ca_cert | false | String | SSL CA certificate |
|
127
|
+
| kafka.ssl.client_cert | false | String | SSL client certificate |
|
128
|
+
| kafka.ssl.client_cert_key | false | String | SSL client certificate password |
|
129
|
+
| connection_pool.size | false | Integer | Connection pool size for message producers connection pool |
|
130
|
+
| connection_pool.timeout | false | Integer | Connection pool timeout for message producers connection pool |
|
122
131
|
|
123
132
|
To apply this configuration, you need to use a *setup* method from the Karafka::App class (app.rb):
|
124
133
|
|
@@ -126,7 +135,8 @@ To apply this configuration, you need to use a *setup* method from the Karafka::
|
|
126
135
|
class App < Karafka::App
|
127
136
|
setup do |config|
|
128
137
|
config.kafka.hosts = %w( 127.0.0.1:9092 )
|
129
|
-
config.
|
138
|
+
config.inline_mode = false
|
139
|
+
config.batch_mode = false
|
130
140
|
config.redis = {
|
131
141
|
url: 'redis://redis.example.com:7372/1'
|
132
142
|
}
|
@@ -140,12 +150,14 @@ Note: You can use any library like [Settingslogic](https://github.com/binarylogi
|
|
140
150
|
|
141
151
|
### Configurators
|
142
152
|
|
143
|
-
|
153
|
+
For additional setup and/or configuration tasks you can create custom configurators. Similar to Rails these are added to a `config/initializers` directory and run after app initialization.
|
154
|
+
|
155
|
+
Your new configurator class must inherit from `Karafka::Setup::Configurators::Base` and implement a `setup` method.
|
144
156
|
|
145
157
|
Example configuration class:
|
146
158
|
|
147
159
|
```ruby
|
148
|
-
class ExampleConfigurator < Base
|
160
|
+
class ExampleConfigurator < Karafka::Setup::Configurators::Base
|
149
161
|
def setup
|
150
162
|
ExampleClass.logger = Karafka.logger
|
151
163
|
ExampleClass.redis = config.redis
|
@@ -216,7 +228,8 @@ There are also several other methods available (optional):
|
|
216
228
|
- *parser* - Class name - name of a parser class that we want to use to parse incoming data
|
217
229
|
- *interchanger* - Class name - name of a interchanger class that we want to use to format data that we put/fetch into/from *#perform_async*
|
218
230
|
- *responder* - Class name - name of a responder that we want to use to generate responses to other Kafka topics based on our processed data
|
219
|
-
- *
|
231
|
+
- *inline_mode* - Boolean - Do we want to perform logic without enqueuing it with Sidekiq (directly and asap) - overwrites global app setting
|
232
|
+
- *batch_mode* - Boolean - Handle the incoming messages in batch, or one at a time - overwrites global app setting
|
220
233
|
|
221
234
|
```ruby
|
222
235
|
App.routes.draw do
|
@@ -227,7 +240,8 @@ App.routes.draw do
|
|
227
240
|
parser Parsers::BinaryToJson
|
228
241
|
interchanger Interchangers::Binary
|
229
242
|
responder BinaryVideoProcessingResponder
|
230
|
-
|
243
|
+
inline_mode true
|
244
|
+
batch_mode true
|
231
245
|
end
|
232
246
|
|
233
247
|
topic :new_videos do
|
@@ -297,9 +311,14 @@ Keep in mind, that params might be in two states: parsed or unparsed when passed
|
|
297
311
|
|
298
312
|
##### Parser
|
299
313
|
|
300
|
-
- *parser* - Class name - name of a parser class that we want to use to
|
314
|
+
- *parser* - Class name - name of a parser class that we want to use to serialize and deserialize incoming and outgoing data.
|
315
|
+
|
316
|
+
Karafka by default will parse messages with a Json parser. If you want to change this behaviour you need to set a custom parser for each route. Parser needs to have a following class methods:
|
317
|
+
|
318
|
+
- *parse* - method used to parse incoming string into an object/hash
|
319
|
+
- *generate* - method used in responders in order to convert objects into strings that have desired format
|
301
320
|
|
302
|
-
|
321
|
+
and raise an error that is a ::Karafka::Errors::ParserError descendant when problem appears during the parsing process.
|
303
322
|
|
304
323
|
```ruby
|
305
324
|
class XmlParser
|
@@ -310,6 +329,10 @@ class XmlParser
|
|
310
329
|
rescue REXML::ParseException
|
311
330
|
raise ParserError
|
312
331
|
end
|
332
|
+
|
333
|
+
def self.generate(object)
|
334
|
+
object.to_xml
|
335
|
+
end
|
313
336
|
end
|
314
337
|
|
315
338
|
App.routes.draw do
|
@@ -320,13 +343,13 @@ App.routes.draw do
|
|
320
343
|
end
|
321
344
|
```
|
322
345
|
|
323
|
-
Note that parsing failure won't stop the application flow. Instead, Karafka will assign the raw message inside the :message key of params. That way you can handle raw message inside the Sidekiq worker (you can implement error detection, etc - any "heavy" parsing logic can and should be implemented there).
|
346
|
+
Note that parsing failure won't stop the application flow. Instead, Karafka will assign the raw message inside the :message key of params. That way you can handle raw message inside the Sidekiq worker (you can implement error detection, etc. - any "heavy" parsing logic can and should be implemented there).
|
324
347
|
|
325
348
|
##### Interchanger
|
326
349
|
|
327
350
|
- *interchanger* - Class name - name of an interchanger class that we want to use to format data that we put/fetch into/from #perform_async.
|
328
351
|
|
329
|
-
Custom interchangers target issues with non-standard (binary, etc) data that we want to store when we do #perform_async. This data might be corrupted when fetched in a worker (see [this](https://github.com/karafka/karafka/issues/30) issue). With custom interchangers, you can encode/compress data before it is being passed to scheduling and decode/decompress it when it gets into the worker.
|
352
|
+
Custom interchangers target issues with non-standard (binary, etc.) data that we want to store when we do #perform_async. This data might be corrupted when fetched in a worker (see [this](https://github.com/karafka/karafka/issues/30) issue). With custom interchangers, you can encode/compress data before it is being passed to scheduling and decode/decompress it when it gets into the worker.
|
330
353
|
|
331
354
|
**Warning**: if you decide to use slow interchangers, they might significantly slow down Karafka.
|
332
355
|
|
@@ -353,7 +376,7 @@ end
|
|
353
376
|
|
354
377
|
- *responder* - Class name - name of a responder that we want to use to generate responses to other Kafka topics based on our processed data.
|
355
378
|
|
356
|
-
Responders are used to design the response that should be generated and sent to proper Kafka topics, once processing is done. It allows programmers to build not only data-consuming apps, but to build apps that consume data and, then, based on the business logic output send this processed data onwards (
|
379
|
+
Responders are used to design the response that should be generated and sent to proper Kafka topics, once processing is done. It allows programmers to build not only data-consuming apps, but to build apps that consume data and, then, based on the business logic output send this processed data onwards (similarly to how Bash pipelines work).
|
357
380
|
|
358
381
|
```ruby
|
359
382
|
class Responder < ApplicationResponder
|
@@ -369,9 +392,9 @@ end
|
|
369
392
|
|
370
393
|
For more details about responders, please go to the [using responders](#using-responders) section.
|
371
394
|
|
372
|
-
##### Inline flag
|
395
|
+
##### Inline mode flag
|
373
396
|
|
374
|
-
Inline flag allows you to disable Sidekiq usage by performing your #perform method business logic in the main Karafka server process.
|
397
|
+
Inline mode flag allows you to disable Sidekiq usage by performing your #perform method business logic in the main Karafka server process.
|
375
398
|
|
376
399
|
This flag be useful when you want to:
|
377
400
|
|
@@ -380,6 +403,12 @@ This flag be useful when you want to:
|
|
380
403
|
|
381
404
|
Note: Keep in mind, that by using this, you can significantly slow down Karafka. You also loose all the advantages of Sidekiq processing (reentrancy, retries, etc).
|
382
405
|
|
406
|
+
##### Batch mode flag
|
407
|
+
|
408
|
+
Batch mode allows you to increase the overall throughput of your kafka consumer by handling incoming messages in batches, instead of one at a time.
|
409
|
+
|
410
|
+
Note: The downside of increasing throughput is a slight increase in latency. Also keep in mind, that the client commits the offset of the batch's messages only **after** the entire batch has been scheduled into Sidekiq (or processed in case of inline mode).
|
411
|
+
|
383
412
|
### Receiving messages
|
384
413
|
|
385
414
|
Karafka framework has a long running server process that is responsible for receiving messages.
|
@@ -403,7 +432,7 @@ If you don't want to use Sidekiq for processing and you would rather process mes
|
|
403
432
|
```ruby
|
404
433
|
class App < Karafka::App
|
405
434
|
setup do |config|
|
406
|
-
config.
|
435
|
+
config.inline_mode = true
|
407
436
|
# Rest of the config
|
408
437
|
end
|
409
438
|
end
|
@@ -415,12 +444,12 @@ or per route (when you want to treat some routes in a different way):
|
|
415
444
|
App.routes.draw do
|
416
445
|
topic :binary_video_details do
|
417
446
|
controller Videos::DetailsController
|
418
|
-
|
447
|
+
inline_mode true
|
419
448
|
end
|
420
449
|
end
|
421
450
|
```
|
422
451
|
|
423
|
-
Note: it can slow Karafka significantly if you do heavy stuff that way.
|
452
|
+
Note: it can slow Karafka down significantly if you do heavy stuff that way.
|
424
453
|
|
425
454
|
### Sending messages from Karafka
|
426
455
|
|
@@ -507,20 +536,20 @@ end
|
|
507
536
|
#### Controllers callbacks
|
508
537
|
|
509
538
|
You can add any number of *before_enqueue* callbacks. It can be a method or a block.
|
510
|
-
before_enqueue acts in a similar way to Rails before_action so it should perform "lightweight" operations. You have access to params inside. Based on
|
539
|
+
before_enqueue acts in a similar way to Rails before_action so it should perform "lightweight" operations. You have access to params inside. Based on them you can define which data you want to receive and which you do not.
|
511
540
|
|
512
|
-
**Warning**: keep in mind, that all *before_enqueue* blocks/methods are executed after messages are received. This is not executed in Sidekiq, but right after receiving the incoming message. This means, that if you perform "heavy duty" operations there, Karafka might
|
541
|
+
**Warning**: keep in mind, that all *before_enqueue* blocks/methods are executed after messages are received. This is not executed in Sidekiq, but right after receiving the incoming message. This means, that if you perform "heavy duty" operations there, Karafka might slow down significantly.
|
513
542
|
|
514
543
|
If any of callbacks throws :abort - *perform* method will be not enqueued to the worker (the execution chain will stop).
|
515
544
|
|
516
|
-
Once you run consumer - messages from Kafka server will be send to a proper controller (based on topic name).
|
545
|
+
Once you run a consumer - messages from Kafka server will be send to a proper controller (based on topic name).
|
517
546
|
|
518
547
|
Presented example controller will accept incoming messages from a Kafka topic named :karafka_topic
|
519
548
|
|
520
549
|
```ruby
|
521
550
|
class TestController < ApplicationController
|
522
551
|
# before_enqueue has access to received params.
|
523
|
-
# You can modify them before
|
552
|
+
# You can modify them before enqueuing it to sidekiq.
|
524
553
|
before_enqueue {
|
525
554
|
params.merge!(received_time: Time.now.to_s)
|
526
555
|
}
|
@@ -577,6 +606,8 @@ class ExampleResponder < ApplicationResponder
|
|
577
606
|
end
|
578
607
|
```
|
579
608
|
|
609
|
+
When passing data back to Kafka, responder uses parser **#generate** method to convert message object to a string. It will use parser of a route for which a current message was directed. By default it uses Karafka::Parsers::Json parser.
|
610
|
+
|
580
611
|
Note: You can use responders outside of controllers scope, however it is not recommended because then, they won't be listed when executing **karafka flow** CLI command.
|
581
612
|
|
582
613
|
#### Registering topics
|
@@ -604,7 +635,7 @@ When you receive a single HTTP request, you generate a single HTTP response. Thi
|
|
604
635
|
|
605
636
|
To handle responding, you need to define *#respond* instance method. This method should accept the same amount of arguments passed into *#respond_with* method.
|
606
637
|
|
607
|
-
In order to send a message to a given topic, you have to use
|
638
|
+
In order to send a message to a given topic, you have to use **#respond_to** method that accepts two arguments:
|
608
639
|
|
609
640
|
- topic name (Symbol)
|
610
641
|
- data you want to send (if data is not string, responder will try to run #to_json method on the incoming data)
|
@@ -630,12 +661,36 @@ end
|
|
630
661
|
|
631
662
|
In order to ensure the dataflow is as intended, responder will validate what and where was sent, making sure that:
|
632
663
|
|
633
|
-
- Only topics that were registered were used (no typos, etc)
|
664
|
+
- Only topics that were registered were used (no typos, etc.)
|
634
665
|
- Only a single message was sent to a topic that was registered without a **multiple_usage** flag
|
635
666
|
- Any topic that was registered with **required** flag (default behavior) has been used
|
636
667
|
|
637
668
|
This is an automatic process and does not require any triggers.
|
638
669
|
|
670
|
+
#### Response partitioning
|
671
|
+
|
672
|
+
Kafka topics are partitioned, which means that you can assing messages to partitions based on your business logic. To do so from responders, you can pass one of the following keyword arguments as a last option of a **#respond_to** method:
|
673
|
+
|
674
|
+
* partition - use it when you want to send a given message to a certain partition
|
675
|
+
* partition_key - use it when you want to ensure that a certain group of messages is delivered to the same partition, but you don't which partition it will be.
|
676
|
+
|
677
|
+
```ruby
|
678
|
+
class ExampleResponder < ApplicationResponder
|
679
|
+
topic :regular_topic
|
680
|
+
topic :different_topic
|
681
|
+
|
682
|
+
def respond(user, profile)
|
683
|
+
respond_to :regular_topic, user, partition: 12
|
684
|
+
# This will send user details to a partition based on the first letter
|
685
|
+
# of login which means that for example all users with login starting
|
686
|
+
# with "a" will go to the same partition on the different_topic
|
687
|
+
respond_to :different_topic, user, partition_key: user.login[0].downcase
|
688
|
+
end
|
689
|
+
end
|
690
|
+
```
|
691
|
+
|
692
|
+
If no keys are passed, the producer will randomly assign a partition.
|
693
|
+
|
639
694
|
## Monitoring and logging
|
640
695
|
|
641
696
|
Karafka provides a simple monitor (Karafka::Monitor) with a really small API. You can use it to develop your own monitoring system (using for example NewRelic). By default, the only thing that is hooked up to this monitoring is a Karafka logger (Karafka::Logger). It is based on a standard [Ruby logger](http://ruby-doc.org/stdlib-2.2.3/libdoc/logger/rdoc/Logger.html).
|
@@ -647,7 +702,7 @@ class App < Karafka::App
|
|
647
702
|
setup do |config|
|
648
703
|
# Other setup stuff...
|
649
704
|
config.logger = MyCustomLogger.new
|
650
|
-
config.monitor = CustomMonitor.
|
705
|
+
config.monitor = CustomMonitor.instance
|
651
706
|
end
|
652
707
|
end
|
653
708
|
```
|
data/karafka.gemspec
CHANGED
@@ -21,11 +21,11 @@ Gem::Specification.new do |spec|
|
|
21
21
|
spec.add_dependency 'worker-glass', '~> 0.2'
|
22
22
|
spec.add_dependency 'celluloid', '~> 0.17'
|
23
23
|
spec.add_dependency 'envlogic', '~> 1.0'
|
24
|
-
spec.add_dependency 'waterdrop', '~> 0.3'
|
24
|
+
spec.add_dependency 'waterdrop', '~> 0.3.2.1'
|
25
25
|
spec.add_dependency 'rake', '~> 11.3'
|
26
26
|
spec.add_dependency 'thor', '~> 0.19'
|
27
27
|
spec.add_dependency 'activesupport', '~> 5.0'
|
28
|
-
spec.add_dependency 'dry-configurable', '~> 0.
|
28
|
+
spec.add_dependency 'dry-configurable', '~> 0.5'
|
29
29
|
spec.required_ruby_version = '>= 2.3.0'
|
30
30
|
|
31
31
|
spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(spec)/}) }
|
@@ -103,7 +103,7 @@ module Karafka
|
|
103
103
|
# will schedule a perform task in sidekiq
|
104
104
|
def schedule
|
105
105
|
run_callbacks :schedule do
|
106
|
-
|
106
|
+
inline_mode ? perform_inline : perform_async
|
107
107
|
end
|
108
108
|
end
|
109
109
|
|
@@ -149,7 +149,7 @@ module Karafka
|
|
149
149
|
raise(Errors::ResponderMissing, self.class) unless responder
|
150
150
|
|
151
151
|
Karafka.monitor.notice(self.class, data: data)
|
152
|
-
responder.new.call(*data)
|
152
|
+
responder.new(parser).call(*data)
|
153
153
|
end
|
154
154
|
|
155
155
|
# Executes perform code immediately (without enqueuing)
|
@@ -16,6 +16,15 @@ module Karafka
|
|
16
16
|
# end
|
17
17
|
# end
|
18
18
|
#
|
19
|
+
# @example Responding to a topic with extra options
|
20
|
+
# class Responder < BaseResponder
|
21
|
+
# topic :new_action
|
22
|
+
#
|
23
|
+
# def respond(data)
|
24
|
+
# respond_to :new_action, data, partition_key: 'thing'
|
25
|
+
# end
|
26
|
+
# end
|
27
|
+
#
|
19
28
|
# @example Marking topic as not required (we won't have to use it)
|
20
29
|
# class Responder < BaseResponder
|
21
30
|
# topic :required_topic
|
@@ -65,8 +74,11 @@ module Karafka
|
|
65
74
|
end
|
66
75
|
|
67
76
|
# Creates a responder object
|
77
|
+
# @param parser_class [Class] parser class that we can use to generate appropriate string
|
78
|
+
# or nothing if we want to default to Karafka::Parsers::Json
|
68
79
|
# @return [Karafka::BaseResponder] base responder descendant responder
|
69
|
-
def initialize
|
80
|
+
def initialize(parser_class = Karafka::Parsers::Json)
|
81
|
+
@parser_class = parser_class
|
70
82
|
@messages_buffer = {}
|
71
83
|
end
|
72
84
|
|
@@ -94,14 +106,13 @@ module Karafka
|
|
94
106
|
# as many times as we need. Especially when we have 1:n flow
|
95
107
|
# @param topic [Symbol, String] topic to which we want to respond
|
96
108
|
# @param data [String, Object] string or object that we want to send
|
97
|
-
# @
|
98
|
-
# on it.
|
109
|
+
# @param options [Hash] options for waterdrop (e.g. partition_key)
|
99
110
|
# @note Respond to does not accept multiple data arguments.
|
100
|
-
def respond_to(topic, data)
|
101
|
-
Karafka.monitor.notice(self.class, topic: topic, data: data)
|
111
|
+
def respond_to(topic, data, options = {})
|
112
|
+
Karafka.monitor.notice(self.class, topic: topic, data: data, options: options)
|
102
113
|
|
103
114
|
messages_buffer[topic.to_s] ||= []
|
104
|
-
messages_buffer[topic.to_s] << (data
|
115
|
+
messages_buffer[topic.to_s] << [@parser_class.generate(data), options]
|
105
116
|
end
|
106
117
|
|
107
118
|
# Checks if we met all the topics requirements. It will fail if we didn't send a message to
|
@@ -122,7 +133,9 @@ module Karafka
|
|
122
133
|
# what we send is legit and it will go to a proper topics
|
123
134
|
def deliver!
|
124
135
|
messages_buffer.each do |topic, data_elements|
|
125
|
-
data_elements.each
|
136
|
+
data_elements.each do |(data, options)|
|
137
|
+
::WaterDrop::Message.new(topic, data, options).send!
|
138
|
+
end
|
126
139
|
end
|
127
140
|
end
|
128
141
|
end
|
@@ -2,6 +2,7 @@
|
|
2
2
|
# @see https://github.com/seuros/capistrano-puma/blob/master/lib/capistrano/tasks/puma.rake
|
3
3
|
namespace :load do
|
4
4
|
task :defaults do
|
5
|
+
set :karafka_role, :app
|
5
6
|
set :karafka_default_hooks, -> { true }
|
6
7
|
set :karafka_env, -> { fetch(:karafka_env, fetch(:environment)) }
|
7
8
|
set :karafka_pid, -> { File.join(shared_path, 'tmp', 'pids', 'karafka.pid') }
|
@@ -17,7 +18,7 @@ end
|
|
17
18
|
namespace :karafka do
|
18
19
|
desc 'Stop Karafka'
|
19
20
|
task :stop do
|
20
|
-
on roles(:
|
21
|
+
on roles(fetch(:karafka_role)) do |host|
|
21
22
|
within shared_path do
|
22
23
|
# If there's no pidfile it means that Karafka is not running
|
23
24
|
next unless test "cat #{fetch(:karafka_pid)}"
|
@@ -40,7 +41,7 @@ namespace :karafka do
|
|
40
41
|
|
41
42
|
desc 'Start Karafka'
|
42
43
|
task :start do
|
43
|
-
on roles(:
|
44
|
+
on roles(fetch(:karafka_role)) do |host|
|
44
45
|
within current_path do
|
45
46
|
# We use all 3 because when combined with Sinatra/Rails it will use their parts as well
|
46
47
|
# so we want to set proper env for any of them
|
@@ -63,7 +64,7 @@ namespace :karafka do
|
|
63
64
|
|
64
65
|
desc 'Status Karafka'
|
65
66
|
task :status do
|
66
|
-
on roles(:
|
67
|
+
on roles(fetch(:karafka_role)) do |host|
|
67
68
|
if test "cat #{fetch(:karafka_pid)}"
|
68
69
|
pid = capture "cat #{fetch(:karafka_pid)}"
|
69
70
|
|
data/lib/karafka/cli/info.rb
CHANGED
@@ -12,7 +12,8 @@ module Karafka
|
|
12
12
|
info = [
|
13
13
|
"Karafka framework version: #{Karafka::VERSION}",
|
14
14
|
"Application name: #{config.name}",
|
15
|
-
"Inline mode: #{config.
|
15
|
+
"Inline mode: #{config.inline_mode}",
|
16
|
+
"Batch mode: #{config.batch_mode}",
|
16
17
|
"Number of threads: #{config.concurrency}",
|
17
18
|
"Boot file: #{Karafka.boot_file}",
|
18
19
|
"Environment: #{Karafka.env}",
|
@@ -15,7 +15,9 @@ module Karafka
|
|
15
15
|
# @yieldparam [Kafka::FetchedMessage] kafka fetched message
|
16
16
|
# @note This will yield with a raw message - no preprocessing or reformatting
|
17
17
|
def fetch_loop
|
18
|
-
|
18
|
+
send(
|
19
|
+
@route.batch_mode ? :consume_each_batch : :consume_each_message
|
20
|
+
) do |message|
|
19
21
|
yield(message)
|
20
22
|
end
|
21
23
|
end
|
@@ -28,27 +30,53 @@ module Karafka
|
|
28
30
|
|
29
31
|
private
|
30
32
|
|
33
|
+
# Consumes messages from Kafka in batches
|
34
|
+
# @yieldparam [Kafka::FetchedMessage] kafka fetched message
|
35
|
+
def consume_each_batch
|
36
|
+
kafka_consumer.each_batch do |batch|
|
37
|
+
batch.messages.each do |message|
|
38
|
+
yield(message)
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
# Consumes messages from Kafka one by one
|
44
|
+
# @yieldparam [Kafka::FetchedMessage] kafka fetched message
|
45
|
+
def consume_each_message
|
46
|
+
kafka_consumer.each_message do |message|
|
47
|
+
yield(message)
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
31
51
|
# @return [Kafka::Consumer] returns a ready to consume Kafka consumer
|
32
52
|
# that is set up to consume a given routes topic
|
33
53
|
def kafka_consumer
|
34
|
-
|
35
|
-
|
36
|
-
kafka = Kafka.new(
|
37
|
-
seed_brokers: ::Karafka::App.config.kafka.hosts,
|
38
|
-
logger: ::Karafka.logger,
|
39
|
-
client_id: ::Karafka::App.config.name
|
40
|
-
)
|
41
|
-
|
42
|
-
@kafka_consumer = kafka.consumer(
|
54
|
+
@kafka_consumer ||= kafka.consumer(
|
43
55
|
group_id: @route.group,
|
44
56
|
session_timeout: ::Karafka::App.config.kafka.session_timeout,
|
45
57
|
offset_commit_interval: ::Karafka::App.config.kafka.offset_commit_interval,
|
46
58
|
offset_commit_threshold: ::Karafka::App.config.kafka.offset_commit_threshold,
|
47
59
|
heartbeat_interval: ::Karafka::App.config.kafka.heartbeat_interval
|
48
|
-
)
|
60
|
+
).tap do |consumer|
|
61
|
+
consumer.subscribe(
|
62
|
+
@route.topic,
|
63
|
+
start_from_beginning: @route.start_from_beginning
|
64
|
+
)
|
65
|
+
end
|
66
|
+
end
|
49
67
|
|
50
|
-
|
51
|
-
|
68
|
+
# @return [Kafka] returns a Kafka
|
69
|
+
# @note We don't cache it internally because we cache kafka_consumer that uses kafka
|
70
|
+
# object instance
|
71
|
+
def kafka
|
72
|
+
Kafka.new(
|
73
|
+
seed_brokers: ::Karafka::App.config.kafka.hosts,
|
74
|
+
logger: ::Karafka.logger,
|
75
|
+
client_id: ::Karafka::App.config.name,
|
76
|
+
ssl_ca_cert: ::Karafka::App.config.kafka.ssl.ca_cert,
|
77
|
+
ssl_client_cert: ::Karafka::App.config.kafka.ssl.client_cert,
|
78
|
+
ssl_client_cert_key: ::Karafka::App.config.kafka.ssl.client_cert_key
|
79
|
+
)
|
52
80
|
end
|
53
81
|
end
|
54
82
|
end
|
data/lib/karafka/monitor.rb
CHANGED
@@ -73,8 +73,19 @@ module Karafka
|
|
73
73
|
def caller_label
|
74
74
|
# We need to calculate ancestors because if someone inherits
|
75
75
|
# from this class, caller chains is longer
|
76
|
-
index = self.class.ancestors.index(Karafka::Monitor)
|
77
|
-
caller_locations
|
76
|
+
index = self.class.ancestors.index(Karafka::Monitor)
|
77
|
+
# caller_locations has a differs in result whether it is a subclass of
|
78
|
+
# Karafka::Monitor, the basic Karafka::Monitor itself or a super for a subclass.
|
79
|
+
# So to cover all the cases we need to differentiate.
|
80
|
+
# @see https://github.com/karafka/karafka/issues/128
|
81
|
+
# @note It won't work if the monitor caller_label caller class is defined using
|
82
|
+
# define method
|
83
|
+
super_execution = caller_locations(1, 2)[0].label == caller_locations(1, 2)[1].label
|
84
|
+
|
85
|
+
scope = super_execution ? 1 : nil
|
86
|
+
scope ||= index.positive? ? 0 : 1
|
87
|
+
|
88
|
+
caller_locations(index + 1, 2)[scope].label
|
78
89
|
end
|
79
90
|
|
80
91
|
# @return [Logger] logger instance
|
@@ -92,7 +92,7 @@ module Karafka
|
|
92
92
|
def parse(content)
|
93
93
|
self[:parser].parse(content)
|
94
94
|
# We catch both of them, because for default JSON - we use JSON parser directly
|
95
|
-
rescue ::Karafka::Errors::ParserError
|
95
|
+
rescue ::Karafka::Errors::ParserError => e
|
96
96
|
Karafka.monitor.notice_error(self.class, e)
|
97
97
|
return { message: content }
|
98
98
|
ensure
|
@@ -0,0 +1,36 @@
|
|
1
|
+
module Karafka
|
2
|
+
# Module for all supported by default parsers for incoming/outgoing data
|
3
|
+
module Parsers
|
4
|
+
# Default Karafka Json parser for serializing and deserializing data
|
5
|
+
class Json
|
6
|
+
# @param content [String] content based on which we want to get our hash
|
7
|
+
# @return [Hash] hash with parsed JSON data
|
8
|
+
# @example
|
9
|
+
# Json.parse("{\"a\":1}") #=> { 'a' => 1 }
|
10
|
+
def self.parse(content)
|
11
|
+
::JSON.parse(content)
|
12
|
+
rescue ::JSON::ParserError => e
|
13
|
+
raise ::Karafka::Errors::ParserError, e
|
14
|
+
end
|
15
|
+
|
16
|
+
# @param content [Object] any object that we want to convert to a json string
|
17
|
+
# @return [String] Valid JSON string containing serialized data
|
18
|
+
# @raise [Karafka::Errors::ParserError] raised when we don't have a way to parse
|
19
|
+
# given content to a json string format
|
20
|
+
# @note When string is passed to this method, we assume that it is already a json
|
21
|
+
# string and we don't serialize it again. This allows us to serialize data before
|
22
|
+
# it is being forwarded to a parser if we want to have a custom (not that simple)
|
23
|
+
# json serialization
|
24
|
+
#
|
25
|
+
# @example From an ActiveRecord object
|
26
|
+
# Json.generate(Repository.first) #=> "{\"repository\":{\"id\":\"04b504e0\"}}"
|
27
|
+
# @example From a string (no changes)
|
28
|
+
# Json.generate("{\"a\":1}") #=> "{\"a\":1}"
|
29
|
+
def self.generate(content)
|
30
|
+
return content if content.is_a?(String)
|
31
|
+
return content.to_json if content.respond_to?(:to_json)
|
32
|
+
raise Karafka::Errors::ParserError, content
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
module Karafka
|
2
|
+
# Namespace for patches of external gems/libraries
|
3
|
+
module Patches
|
4
|
+
# Patch that will allow to use proc based lazy evaluated settings with Dry Configurable
|
5
|
+
# @see https://github.com/dry-rb/dry-configurable/blob/master/lib/dry/configurable.rb
|
6
|
+
module DryConfigurable
|
7
|
+
# We overwrite ::Dry::Configurable::Config to change on proc behaviour
|
8
|
+
# Unfortunately it does not provide an on call proc evaluation, so
|
9
|
+
# this feature had to be added here on demand/
|
10
|
+
# @param args Any arguments that DryConfigurable::Config accepts
|
11
|
+
def initialize(*args)
|
12
|
+
super
|
13
|
+
|
14
|
+
@config.each do |key, _value|
|
15
|
+
rebuild(key)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
private
|
20
|
+
|
21
|
+
# Method that rebuilds a given accessor, so when it consists a proc value, it will
|
22
|
+
# evaluate it upon return
|
23
|
+
# @param method_name [Symbol] name of an accessor that we want to rebuild
|
24
|
+
def rebuild(method_name)
|
25
|
+
define_singleton_method method_name do
|
26
|
+
super().is_a?(Proc) ? super().call : super()
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
::Dry::Configurable::Config.prepend(Karafka::Patches::DryConfigurable)
|
@@ -9,8 +9,9 @@ module Karafka
|
|
9
9
|
# - parser - What parsed do we want to use to unparse the data (optional)
|
10
10
|
# - interchanger - What interchanger to encode/decode data do we want to use (optional)
|
11
11
|
class Route
|
12
|
-
# Only ASCII alphanumeric characters
|
13
|
-
|
12
|
+
# Only ASCII alphanumeric characters, underscore, dash and dots
|
13
|
+
# are allowed in topics and groups
|
14
|
+
NAME_FORMAT = /\A(\w|\-|\.)+\z/
|
14
15
|
|
15
16
|
# Options that we can set per each route
|
16
17
|
ATTRIBUTES = %i(
|
@@ -20,7 +21,9 @@ module Karafka
|
|
20
21
|
parser
|
21
22
|
interchanger
|
22
23
|
responder
|
23
|
-
|
24
|
+
inline_mode
|
25
|
+
batch_mode
|
26
|
+
start_from_beginning
|
24
27
|
).freeze
|
25
28
|
|
26
29
|
ATTRIBUTES.each { |attr| attr_writer(attr) }
|
@@ -63,9 +66,9 @@ module Karafka
|
|
63
66
|
end
|
64
67
|
|
65
68
|
# @return [Class] Parser class (not instance) that we want to use to unparse Kafka messages
|
66
|
-
# @note If not provided - will use
|
69
|
+
# @note If not provided - will use Json as default
|
67
70
|
def parser
|
68
|
-
@parser ||=
|
71
|
+
@parser ||= Karafka::Parsers::Json
|
69
72
|
end
|
70
73
|
|
71
74
|
# @return [Class] Interchanger class (not an instance) that we want to use to interchange
|
@@ -77,9 +80,25 @@ module Karafka
|
|
77
80
|
# @return [Boolean] Should we perform execution in the background (default) or
|
78
81
|
# inline. This can be set globally and overwritten by a per route setting
|
79
82
|
# @note This method can be set to false, so direct assigment ||= would not work
|
80
|
-
def
|
81
|
-
return @
|
82
|
-
@
|
83
|
+
def inline_mode
|
84
|
+
return @inline_mode unless @inline_mode.nil?
|
85
|
+
@inline_mode = Karafka::App.config.inline_mode
|
86
|
+
end
|
87
|
+
|
88
|
+
# @return [Boolean] Should the consumer handle incoming events one at a time, or in batch
|
89
|
+
def batch_mode
|
90
|
+
return @batch_mode unless @batch_mode.nil?
|
91
|
+
@batch_mode = Karafka::App.config.batch_mode
|
92
|
+
end
|
93
|
+
|
94
|
+
# For each topic subscription it's possible to decide whether to consume messages starting
|
95
|
+
# at the beginning of the topic or to just consume new messages that are produced to
|
96
|
+
# the topic.
|
97
|
+
# @return [Boolean] Should we consume from the beggining or from new incoming messages on
|
98
|
+
# the first run
|
99
|
+
def start_from_beginning
|
100
|
+
return @start_from_beginning unless @start_from_beginning.nil?
|
101
|
+
@start_from_beginning = Karafka::App.config.start_from_beginning
|
83
102
|
end
|
84
103
|
|
85
104
|
# Checks if topic and group have proper format (acceptable by Kafka)
|
data/lib/karafka/setup/config.rb
CHANGED
@@ -15,8 +15,8 @@ module Karafka
|
|
15
15
|
# Available settings
|
16
16
|
# option name [String] current app name - used to provide default Kafka groups namespaces
|
17
17
|
setting :name
|
18
|
-
# If
|
19
|
-
setting :
|
18
|
+
# If inline_mode is set to true, we won't enqueue jobs, instead we will run them immediately
|
19
|
+
setting :inline_mode, false
|
20
20
|
# option logger [Instance] logger that we want to use
|
21
21
|
setting :logger, ::Karafka::Logger.instance
|
22
22
|
# option monitor [Instance] monitor that we will to use (defaults to Karafka::Monitor)
|
@@ -25,6 +25,24 @@ module Karafka
|
|
25
25
|
# Note that redis could be rewriten using nested options, but it is a sidekiq specific
|
26
26
|
# stuff and we don't want to touch it
|
27
27
|
setting :redis
|
28
|
+
# If batch_mode is true, incoming messages will be handled in batch, otherwsie one at a time.
|
29
|
+
setting :batch_mode, false
|
30
|
+
# whether to consume messages starting at the beginning or to just consume new messages
|
31
|
+
setting :start_from_beginning, true
|
32
|
+
|
33
|
+
# Connection pool options are used for producer (Waterdrop)
|
34
|
+
# They are configured automatically based on Sidekiq concurrency and number of routes
|
35
|
+
# The bigger one is selected as we need to be able to send messages from both places
|
36
|
+
setting :connection_pool do
|
37
|
+
# Connection pool size for producers. Note that we take a bigger number because there
|
38
|
+
# are cases when we might have more sidekiq threads than Karafka routes (small app)
|
39
|
+
# or the opposite for bigger systems
|
40
|
+
setting :size, -> { [::Karafka::App.routes.count, Sidekiq.options[:concurrency]].max }
|
41
|
+
# How long should we wait for a working resource from the pool before rising timeout
|
42
|
+
# With a proper connection pool size, this should never happen
|
43
|
+
setting :timeout, 5
|
44
|
+
end
|
45
|
+
|
28
46
|
# option kafka [Hash] - optional - kafka configuration options (hosts)
|
29
47
|
setting :kafka do
|
30
48
|
# Array with at least one host
|
@@ -42,6 +60,16 @@ module Karafka
|
|
42
60
|
# option heartbeat_interval [Integer] the interval between heartbeats; must be less
|
43
61
|
# than the session window.
|
44
62
|
setting :heartbeat_interval, 10
|
63
|
+
|
64
|
+
# SSL authentication related settings
|
65
|
+
setting :ssl do
|
66
|
+
# option ca_cert [String] SSL CA certificate
|
67
|
+
setting :ca_cert, nil
|
68
|
+
# option client_cert [String] SSL client certificate
|
69
|
+
setting :client_cert, nil
|
70
|
+
# option client_cert_key [String] SSL client certificate password
|
71
|
+
setting :client_cert_key, nil
|
72
|
+
end
|
45
73
|
end
|
46
74
|
|
47
75
|
# This is configured automatically, don't overwrite it!
|
@@ -7,8 +7,8 @@ module Karafka
|
|
7
7
|
def setup
|
8
8
|
::WaterDrop.setup do |water_config|
|
9
9
|
water_config.send_messages = true
|
10
|
-
water_config.connection_pool_size = config.
|
11
|
-
water_config.connection_pool_timeout =
|
10
|
+
water_config.connection_pool_size = config.connection_pool.size
|
11
|
+
water_config.connection_pool_timeout = config.connection_pool.timeout
|
12
12
|
water_config.kafka.hosts = config.kafka.hosts
|
13
13
|
water_config.raise_on_failure = true
|
14
14
|
end
|
data/lib/karafka/version.rb
CHANGED
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: karafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.5.0.
|
4
|
+
version: 0.5.0.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Maciej Mensfeld
|
@@ -10,7 +10,7 @@ authors:
|
|
10
10
|
autorequire:
|
11
11
|
bindir: bin
|
12
12
|
cert_chain: []
|
13
|
-
date:
|
13
|
+
date: 2017-02-09 00:00:00.000000000 Z
|
14
14
|
dependencies:
|
15
15
|
- !ruby/object:Gem::Dependency
|
16
16
|
name: bundler
|
@@ -102,14 +102,14 @@ dependencies:
|
|
102
102
|
requirements:
|
103
103
|
- - "~>"
|
104
104
|
- !ruby/object:Gem::Version
|
105
|
-
version:
|
105
|
+
version: 0.3.2.1
|
106
106
|
type: :runtime
|
107
107
|
prerelease: false
|
108
108
|
version_requirements: !ruby/object:Gem::Requirement
|
109
109
|
requirements:
|
110
110
|
- - "~>"
|
111
111
|
- !ruby/object:Gem::Version
|
112
|
-
version:
|
112
|
+
version: 0.3.2.1
|
113
113
|
- !ruby/object:Gem::Dependency
|
114
114
|
name: rake
|
115
115
|
requirement: !ruby/object:Gem::Requirement
|
@@ -158,14 +158,14 @@ dependencies:
|
|
158
158
|
requirements:
|
159
159
|
- - "~>"
|
160
160
|
- !ruby/object:Gem::Version
|
161
|
-
version: 0.
|
161
|
+
version: '0.5'
|
162
162
|
type: :runtime
|
163
163
|
prerelease: false
|
164
164
|
version_requirements: !ruby/object:Gem::Requirement
|
165
165
|
requirements:
|
166
166
|
- - "~>"
|
167
167
|
- !ruby/object:Gem::Version
|
168
|
-
version: 0.
|
168
|
+
version: '0.5'
|
169
169
|
description: " Framework used to simplify Apache Kafka based Ruby applications development "
|
170
170
|
email:
|
171
171
|
- maciej@mensfeld.pl
|
@@ -177,6 +177,7 @@ extensions: []
|
|
177
177
|
extra_rdoc_files: []
|
178
178
|
files:
|
179
179
|
- ".gitignore"
|
180
|
+
- ".rspec"
|
180
181
|
- ".ruby-gemset"
|
181
182
|
- ".ruby-version"
|
182
183
|
- ".travis.yml"
|
@@ -217,7 +218,8 @@ files:
|
|
217
218
|
- lib/karafka/monitor.rb
|
218
219
|
- lib/karafka/params/interchanger.rb
|
219
220
|
- lib/karafka/params/params.rb
|
220
|
-
- lib/karafka/
|
221
|
+
- lib/karafka/parsers/json.rb
|
222
|
+
- lib/karafka/patches/dry_configurable.rb
|
221
223
|
- lib/karafka/process.rb
|
222
224
|
- lib/karafka/responders/builder.rb
|
223
225
|
- lib/karafka/responders/topic.rb
|
@@ -262,7 +264,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
262
264
|
version: '0'
|
263
265
|
requirements: []
|
264
266
|
rubyforge_project:
|
265
|
-
rubygems_version: 2.
|
267
|
+
rubygems_version: 2.6.10
|
266
268
|
signing_key:
|
267
269
|
specification_version: 4
|
268
270
|
summary: Ruby based framework for working with Apache Kafka
|
@@ -1,37 +0,0 @@
|
|
1
|
-
# Patch that will allow to use proc based lazy evaluated settings with Dry Configurable
|
2
|
-
# @see https://github.com/dry-rb/dry-configurable/blob/master/lib/dry/configurable.rb
|
3
|
-
module Dry
|
4
|
-
# Configurable module for Dry-Configurable
|
5
|
-
module Configurable
|
6
|
-
# Config node instance struct
|
7
|
-
class Config
|
8
|
-
# @param args [Array] All arguments that a Struct accepts
|
9
|
-
def initialize(*args)
|
10
|
-
super
|
11
|
-
setup_dynamics
|
12
|
-
end
|
13
|
-
|
14
|
-
private
|
15
|
-
|
16
|
-
# Method that sets up all the proc based lazy evaluated dynamic config values
|
17
|
-
def setup_dynamics
|
18
|
-
each_pair do |key, value|
|
19
|
-
next unless value.is_a?(Proc)
|
20
|
-
|
21
|
-
rebuild(key)
|
22
|
-
end
|
23
|
-
end
|
24
|
-
|
25
|
-
# Method that rebuilds a given accessor, so when it consists a proc value, it will
|
26
|
-
# evaluate it upon return
|
27
|
-
# @param method_name [Symbol] name of an accessor that we want to rebuild
|
28
|
-
def rebuild(method_name)
|
29
|
-
metaclass = class << self; self; end
|
30
|
-
|
31
|
-
metaclass.send(:define_method, method_name) do
|
32
|
-
super().is_a?(Proc) ? super().call : super()
|
33
|
-
end
|
34
|
-
end
|
35
|
-
end
|
36
|
-
end
|
37
|
-
end
|