deimos-ruby 1.3.0.pre.beta5 → 1.4.0.pre.beta1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5bf921e758c4cab2c5d35334e1f7dc99382ce414ce1c7492d42175f4c874ae54
4
- data.tar.gz: f1d2c561854e12adf325ee3a6805f3ddac40f185fe5420932438e38ffee43688
3
+ metadata.gz: 8b4c2a6f41276ada5bf402ac01c6cf445a014a155bbcc6fe48e83bbb9aa1d8a2
4
+ data.tar.gz: 1041a4c5051900134f66893e86564c0ef896df269d0af9973aaf7f1f7cecb733
5
5
  SHA512:
6
- metadata.gz: b9fe23ad5ecb545ad6a8743bfd5bfb735d9b7d672ae5dfee16d2456b02397c7f8ef3adc71a43a4043aa02974bfa1d6a7a60cd373e8f522e03eba739e877c82cb
7
- data.tar.gz: df9ea2796a368360ff79ada8735060f7585408d7c59043b488a31bf9fcbb4e7bcd52017882f2f458b8b48d30ff8807e93c640820fef264d859bc1be329bb668e
6
+ metadata.gz: fdea52b89a1b788d9bdd5b6244d4f278139fb01e10b201720dd2afe052a418251a784d2756624cb3896d705016dbca0252206b27163a2c5e2c7eb200294b5979
7
+ data.tar.gz: 5df060deb523e4b4879f47a18034960afb905edde95da9ac98f7dd5f5e0a022e1e8be3b81c13685c26c6837ed3ef6505394607469b075b8e2b1068ddabf61bcb
data/CHANGELOG.md CHANGED
@@ -7,15 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
7
7
 
8
8
  ## UNRELEASED
9
9
 
10
- # [1.3.0-beta5] - 2020-01-14
11
- - Added `db_producer.insert` and `db_producer.process` metrics.
12
-
13
- # [1.3.0-beta4] - 2019-12-02
14
- - Fixed bug where by running `rake deimos:start` without
15
- specifying a producer backend would crash.
16
-
17
- # [1.3.0-beta3] - 2019-11-26
18
- - Fixed bug in TestHelpers where key_decoder was not stubbed out.
10
+ # [1.4.0-beta1] - 2019-11-22
11
+ - Complete revamp of configuration method.
19
12
 
20
13
  # [1.3.0-beta2] - 2019-11-22
21
14
  - Fixed bug where consumers would require a key config in all cases
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- deimos-ruby (1.3.0.pre.beta4)
4
+ deimos-ruby (1.4.0.pre.beta1)
5
5
  avro-patches (~> 0.3)
6
6
  avro_turf (~> 0.8)
7
7
  phobos (~> 1.8.2.pre.beta2)
@@ -10,32 +10,6 @@ PATH
10
10
  GEM
11
11
  remote: https://rubygems.org/
12
12
  specs:
13
- actioncable (5.2.0)
14
- actionpack (= 5.2.0)
15
- nio4r (~> 2.0)
16
- websocket-driver (>= 0.6.1)
17
- actionmailer (5.2.0)
18
- actionpack (= 5.2.0)
19
- actionview (= 5.2.0)
20
- activejob (= 5.2.0)
21
- mail (~> 2.5, >= 2.5.4)
22
- rails-dom-testing (~> 2.0)
23
- actionpack (5.2.0)
24
- actionview (= 5.2.0)
25
- activesupport (= 5.2.0)
26
- rack (~> 2.0)
27
- rack-test (>= 0.6.3)
28
- rails-dom-testing (~> 2.0)
29
- rails-html-sanitizer (~> 1.0, >= 1.0.2)
30
- actionview (5.2.0)
31
- activesupport (= 5.2.0)
32
- builder (~> 3.1)
33
- erubi (~> 1.4)
34
- rails-dom-testing (~> 2.0)
35
- rails-html-sanitizer (~> 1.0, >= 1.0.3)
36
- activejob (5.2.0)
37
- activesupport (= 5.2.0)
38
- globalid (>= 0.3.6)
39
13
  activemodel (5.2.0)
40
14
  activesupport (= 5.2.0)
41
15
  activerecord (5.2.0)
@@ -44,10 +18,6 @@ GEM
44
18
  arel (>= 9.0)
45
19
  activerecord-import (0.28.1)
46
20
  activerecord (>= 3.2)
47
- activestorage (5.2.0)
48
- actionpack (= 5.2.0)
49
- activerecord (= 5.2.0)
50
- marcel (~> 0.3.1)
51
21
  activesupport (5.2.0)
52
22
  concurrent-ruby (~> 1.0, >= 1.0.2)
53
23
  i18n (>= 0.7, < 2)
@@ -62,24 +32,19 @@ GEM
62
32
  avro_turf (0.11.0)
63
33
  avro (>= 1.7.7, < 1.10)
64
34
  excon (~> 0.45)
65
- builder (3.2.3)
66
35
  coderay (1.1.2)
67
36
  concurrent-ruby (1.1.3)
68
37
  concurrent-ruby-ext (1.1.3)
69
38
  concurrent-ruby (= 1.1.3)
70
- crass (1.0.5)
71
39
  ddtrace (0.22.0)
72
40
  msgpack
73
41
  diff-lcs (1.3)
74
42
  digest-crc (0.4.1)
75
43
  dogstatsd-ruby (4.2.0)
76
- erubi (1.9.0)
77
- excon (0.71.0)
44
+ excon (0.69.1)
78
45
  exponential-backoff (0.0.4)
79
46
  ffi (1.11.1)
80
47
  formatador (0.2.5)
81
- globalid (0.4.2)
82
- activesupport (>= 4.2.0)
83
48
  guard (2.14.2)
84
49
  formatador (>= 0.2.4)
85
50
  listen (>= 2.7, < 4.0)
@@ -108,26 +73,13 @@ GEM
108
73
  logging (2.2.2)
109
74
  little-plugger (~> 1.1)
110
75
  multi_json (~> 1.10)
111
- loofah (2.4.0)
112
- crass (~> 1.0.2)
113
- nokogiri (>= 1.5.9)
114
76
  lumberjack (1.0.12)
115
- mail (2.7.1)
116
- mini_mime (>= 0.1.1)
117
- marcel (0.3.3)
118
- mimemagic (~> 0.3.2)
119
77
  method_source (0.9.0)
120
- mimemagic (0.3.3)
121
- mini_mime (1.0.2)
122
- mini_portile2 (2.4.0)
123
78
  minitest (5.11.3)
124
79
  msgpack (1.2.10)
125
80
  multi_json (1.14.1)
126
81
  mysql2 (0.5.2)
127
82
  nenv (0.3.0)
128
- nio4r (2.5.2)
129
- nokogiri (1.10.5)
130
- mini_portile2 (~> 2.4.0)
131
83
  notiffany (0.1.1)
132
84
  nenv (~> 0.1)
133
85
  shellany (~> 0.0)
@@ -146,33 +98,6 @@ GEM
146
98
  pry (0.11.3)
147
99
  coderay (~> 1.1.0)
148
100
  method_source (~> 0.9.0)
149
- rack (2.0.7)
150
- rack-test (1.1.0)
151
- rack (>= 1.0, < 3)
152
- rails (5.2.0)
153
- actioncable (= 5.2.0)
154
- actionmailer (= 5.2.0)
155
- actionpack (= 5.2.0)
156
- actionview (= 5.2.0)
157
- activejob (= 5.2.0)
158
- activemodel (= 5.2.0)
159
- activerecord (= 5.2.0)
160
- activestorage (= 5.2.0)
161
- activesupport (= 5.2.0)
162
- bundler (>= 1.3.0)
163
- railties (= 5.2.0)
164
- sprockets-rails (>= 2.0.0)
165
- rails-dom-testing (2.0.3)
166
- activesupport (>= 4.2.0)
167
- nokogiri (>= 1.6)
168
- rails-html-sanitizer (1.3.0)
169
- loofah (~> 2.3)
170
- railties (5.2.0)
171
- actionpack (= 5.2.0)
172
- activesupport (= 5.2.0)
173
- method_source
174
- rake (>= 0.8.7)
175
- thor (>= 0.18.1, < 2.0)
176
101
  rainbow (3.0.0)
177
102
  rake (10.5.0)
178
103
  rb-fsevent (0.10.2)
@@ -207,22 +132,12 @@ GEM
207
132
  ruby-progressbar (1.10.1)
208
133
  ruby_dep (1.5.0)
209
134
  shellany (0.0.1)
210
- sprockets (4.0.0)
211
- concurrent-ruby (~> 1.0)
212
- rack (> 1, < 3)
213
- sprockets-rails (3.2.1)
214
- actionpack (>= 4.0)
215
- activesupport (>= 4.0)
216
- sprockets (>= 3.0.0)
217
135
  sqlite3 (1.3.13)
218
136
  thor (0.20.3)
219
137
  thread_safe (0.3.6)
220
138
  tzinfo (1.2.5)
221
139
  thread_safe (~> 0.1)
222
140
  unicode-display_width (1.6.0)
223
- websocket-driver (0.7.1)
224
- websocket-extensions (>= 0.1.0)
225
- websocket-extensions (0.1.4)
226
141
 
227
142
  PLATFORMS
228
143
  ruby
@@ -239,7 +154,6 @@ DEPENDENCIES
239
154
  guard-rubocop (~> 1)
240
155
  mysql2 (~> 0.5)
241
156
  pg (~> 1.1)
242
- rails (~> 5.2)
243
157
  rake (~> 10)
244
158
  rspec (~> 3)
245
159
  rspec_junit_formatter (~> 0.3)
data/README.md CHANGED
@@ -58,88 +58,7 @@ gem 'deimos-ruby', '~> 1.1'
58
58
 
59
59
  # Configuration
60
60
 
61
- To configure the gem, use `configure` in an initializer:
62
-
63
- ```ruby
64
- Deimos.configure do |config|
65
- # Configure logger
66
- config.logger = Rails.logger
67
-
68
- # Phobos settings
69
- config.phobos_config_file = 'config/phobos.yml'
70
- config.schema_registry_url = 'https://my-schema-registry.com'
71
- config.seed_broker = 'my.seed.broker.0.net:9093,my.seed.broker.1.net:9093'
72
- config.ssl_enabled = ENV['KAFKA_SSL_ENABLED']
73
- if config.ssl_enabled
74
- config.ssl_ca_cert = File.read(ENV['SSL_CA_CERT'])
75
- config.ssl_client_cert = File.read(ENV['SSL_CLIENT_CERT'])
76
- config.ssl_client_cert_key = File.read(ENV['SSL_CLIENT_CERT_KEY'])
77
- end
78
-
79
- # Other settings
80
-
81
- # Local path to find schemas, for publishing and testing consumers
82
- config.schema_path = "#{Rails.root}/app/schemas"
83
-
84
- # Default namespace for producers to use
85
- config.producer_schema_namespace = 'com.deimos.my_app'
86
-
87
- # Prefix for all topics, e.g. environment name
88
- config.producer_topic_prefix = 'myenv.'
89
-
90
- # Disable all producers - e.g. when doing heavy data lifting and events
91
- # would be fired a different way
92
- config.disable_producers = true
93
-
94
- # Default behavior is to swallow uncaught exceptions and log to DataDog.
95
- # Set this to true to instead raise all errors. Note that raising an error
96
- # will ensure that the message cannot be processed - if there is a bad
97
- # message which will always raise that error, your consumer will not
98
- # be able to proceed past it and will be stuck forever until you fix
99
- # your code.
100
- config.reraise_consumer_errors = true
101
-
102
- # Another way to handle errors is to set reraise_consumer_errors to false
103
- # but to set a global "fatal error" block that determines when to reraise:
104
- config.fatal_error do |exception, payload, metadata|
105
- exception.is_a?(BadError)
106
- end
107
- # Another example would be to check the database connection and fail
108
- # if the DB is down entirely.
109
-
110
- # Set to true to send consumer lag metrics
111
- config.report_lag = %w(production staging).include?(Rails.env)
112
-
113
- # Change the default backend. See Backends, below.
114
- config.backend = :db
115
-
116
- # Database Backend producer configuration
117
-
118
- # Logger for DB producer
119
- config.db_producer.logger = Logger.new('/db_producer.log')
120
-
121
- # List of topics to print full messages for, or :all to print all
122
- # topics. This can introduce slowdown since it needs to decode
123
- # each message using the schema registry.
124
- config.db_producer.log_topics = ['topic1', 'topic2']
125
-
126
- # List of topics to compact before sending, i.e. only send the
127
- # last message with any given key in a batch. This is an optimization
128
- # which mirrors what Kafka itself will do with compaction turned on
129
- # but only within a single batch. You can also specify :all to
130
- # compact all topics.
131
- config.db_producer.compact_topics = ['topic1', 'topic2']
132
-
133
- # Configure the metrics provider (see below).
134
- config.metrics = Deimos::Metrics::Mock.new({ tags: %w(env:prod my_tag:another_1) })
135
-
136
- # Configure the tracing provider (see below).
137
- config.tracer = Deimos::Tracing::Mock.new({service_name: 'my-service'})
138
- end
139
- ```
140
-
141
- Note that the configuration options from Phobos (seed_broker and the SSL settings)
142
- can be removed from `phobos.yml` since Deimos will load them instead.
61
+ For a full configuration reference, please see [the configuration docs ](docs/CONFIGURATION.md).
143
62
 
144
63
  # Producers
145
64
 
@@ -148,15 +67,6 @@ Producers will look like this:
148
67
  ```ruby
149
68
  class MyProducer < Deimos::Producer
150
69
 
151
- # Can override default namespace.
152
- namespace 'com.deimos.my-app-special'
153
- topic 'MyApp.MyTopic'
154
- schema 'MySchema'
155
- key_config field: 'my_field' # see Kafka Message Keys, below
156
-
157
- # If config.schema_path is app/schemas, assumes there is a file in
158
- # app/schemas/com/deimos/my-app-special/MySchema.avsc
159
-
160
70
  class << self
161
71
 
162
72
  # Optionally override the default partition key logic, which is to use
@@ -342,18 +252,6 @@ Here is a sample consumer:
342
252
  ```ruby
343
253
  class MyConsumer < Deimos::Consumer
344
254
 
345
- # These are optional but strongly recommended for testing purposes; this
346
- # will validate against a local schema file used as the reader schema,
347
- # as well as being able to write tests against this schema.
348
- # This is recommended since it ensures you are always getting the values
349
- # you expect.
350
- schema 'MySchema'
351
- namespace 'com.my-namespace'
352
- # This directive works identically to the producer - see Kafka Keys, above.
353
- # This only affects the `decode_key` method below. You need to provide
354
- # `schema` and `namespace`, above, for this to work.
355
- key_config field: :my_id
356
-
357
255
  # Optionally overload this to consider a particular exception
358
256
  # "fatal" only for this consumer. This is considered in addition
359
257
  # to the global `fatal_error` configuration block.
@@ -381,7 +279,7 @@ what downstream systems are doing with it) is causing it. If you do
381
279
  not continue on past this message, your consumer will essentially be
382
280
  stuck forever unless you take manual action to skip the offset.
383
281
 
384
- Use `config.reraise_consumer_errors = false` to swallow errors. You
282
+ Use `config.consumers.reraise_errors = false` to swallow errors. You
385
283
  can use instrumentation to handle errors you receive. You can also
386
284
  specify "fatal errors" either via global configuration (`config.fatal_error`)
387
285
  or via overriding a method on an individual consumer (`def fatal_error`).
@@ -393,15 +291,17 @@ messages as an array and then process them together. This can improve
393
291
  consumer throughput, depending on the use case. Batch consumers behave like
394
292
  other consumers in regards to key and payload decoding, etc.
395
293
 
396
- To enable batch consumption, create a listener in `phobos.yml` and ensure that
397
- the `delivery` property is set to `inline_batch`. For example:
294
+ To enable batch consumption, ensure that the `delivery` property is set to `inline_batch`. For example:
398
295
 
399
- ```yaml
400
- listeners:
401
- - handler: Consumers::MyBatchConsumer
402
- topic: my_batched_topic
403
- group_id: my_group_id
404
- delivery: inline_batch
296
+ ```ruby
297
+ Deimos.configure do
298
+ consumer do
299
+ class_name 'Consumers::MyBatchConsumer'
300
+ topic 'my_batched_topic'
301
+ group_id 'my_group_id'
302
+ delivery :inline_batch
303
+ end
304
+ end
405
305
  ```
406
306
 
407
307
  Batch consumers must inherit from the Deimos::BatchConsumer class as in
@@ -410,11 +310,6 @@ this sample:
410
310
  ```ruby
411
311
  class MyBatchConsumer < Deimos::BatchConsumer
412
312
 
413
- # See the Consumer sample in the previous section
414
- schema 'MySchema'
415
- namespace 'com.my-namespace'
416
- key_config field: :my_id
417
-
418
313
  def consume_batch(payloads, metadata)
419
314
  # payloads is an array of Avro-decoded hashes.
420
315
  # metadata is a hash that contains information like :keys and :topic.
@@ -441,10 +336,6 @@ An example would look like this:
441
336
  ```ruby
442
337
  class MyProducer < Deimos::ActiveRecordProducer
443
338
 
444
- topic 'MyApp.MyTopic'
445
- schema 'MySchema'
446
- key_config field: 'my_field'
447
-
448
339
  # The record class should be set on every ActiveRecordProducer.
449
340
  # By default, if you give the producer a hash, it will re-fetch the
450
341
  # record itself for use in the payload generation. This can be useful
@@ -479,7 +370,7 @@ MyProducer.send_events([{foo: 1}, {foo: 2}])
479
370
 
480
371
  You can disable producers globally or inside a block. Globally:
481
372
  ```ruby
482
- Deimos.config.disable_producers = true
373
+ Deimos.config.producers.disabled = true
483
374
  ```
484
375
 
485
376
  For the duration of a block:
@@ -560,7 +451,7 @@ To enable this, first generate the migration to create the relevant tables:
560
451
 
561
452
  You can now set the following configuration:
562
453
 
563
- config.publish_backend = :db
454
+ config.producers.backend = :db
564
455
 
565
456
  This will save all your Kafka messages to the `kafka_messages` table instead
566
457
  of immediately sending to Kafka. Now, you just need to call
@@ -615,9 +506,6 @@ A sample consumer would look as follows:
615
506
 
616
507
  ```ruby
617
508
  class MyConsumer < Deimos::ActiveRecordConsumer
618
-
619
- schema 'MySchema'
620
- key_config field: 'my_field'
621
509
  record_class Widget
622
510
 
623
511
  # Optional override of the way to fetch records based on payload and
@@ -665,7 +553,7 @@ Deimos includes some metrics reporting out the box. It ships with DataDog suppor
665
553
  The following metrics are reported:
666
554
  * `consumer_lag` - for each partition, the number of messages
667
555
  it's behind the tail of the partition (a gauge). This is only sent if
668
- `config.report_lag` is set to true.
556
+ `config.consumers.report_lag` is set to true.
669
557
  * `handler` - a count of the number of messages received. Tagged
670
558
  with the following:
671
559
  * `topic:{topic_name}`
@@ -693,15 +581,10 @@ The following metrics are reported:
693
581
  with the database backend. Tagged with the topic that is waiting.
694
582
  Will send a value of 0 with no topics tagged if there are no messages
695
583
  waiting.
696
- * `db_producer.insert` - the number of messages inserted into the database
697
- for publishing. Tagged with `topic:{topic_name}`
698
- * `db_producer.process` - the number of DB messages processed. Note that this
699
- is *not* the same as the number of messages *published* if those messages
700
- are compacted. Tagged with `topic:{topic_name}`
701
584
 
702
585
  ### Configuring Metrics Providers
703
586
 
704
- See the `# Configure Metrics Provider` section under [Configuration](#configuration)
587
+ See the `metrics` field under [Configuration](CONFIGURATION.md).
705
588
  View all available Metrics Providers [here](lib/deimos/metrics/metrics_providers)
706
589
 
707
590
  ### Custom Metrics Providers
@@ -726,7 +609,7 @@ separate span for message consume logic.
726
609
 
727
610
  ### Configuring Tracing Providers
728
611
 
729
- See the `# Configure Tracing Provider` section under [Configuration](#configuration)
612
+ See the `tracing` field under [Configuration](#configuration).
730
613
  View all available Tracing Providers [here](lib/deimos/tracing)
731
614
 
732
615
  ### Custom Tracing Providers
@@ -768,7 +651,7 @@ test_consume_message(MyConsumer,
768
651
  end
769
652
 
770
653
  # You can also pass a topic name instead of the consumer class as long
771
- # as the topic is configured in your phobos.yml configuration:
654
+ # as the topic is configured in your Deimos configuration:
772
655
  test_consume_message('my-topic-name',
773
656
  { 'some-payload' => 'some-value' }) do |payload, metadata|
774
657
  # do some expectation handling here
data/Rakefile CHANGED
@@ -10,4 +10,4 @@ rescue LoadError # rubocop:disable Lint/HandleExceptions
10
10
  # no rspec available
11
11
  end
12
12
 
13
- import('./lib/tasks/deimos.rake')
13
+ import('./lib/tasks/phobos.rake')
data/deimos-ruby.gemspec CHANGED
@@ -33,7 +33,6 @@ Gem::Specification.new do |spec|
33
33
  spec.add_development_dependency('guard-rubocop', '~> 1')
34
34
  spec.add_development_dependency('mysql2', '~> 0.5')
35
35
  spec.add_development_dependency('pg', '~> 1.1')
36
- spec.add_development_dependency('rails', '~> 5.2')
37
36
  spec.add_development_dependency('rake', '~> 10')
38
37
  spec.add_development_dependency('rspec', '~> 3')
39
38
  spec.add_development_dependency('rspec_junit_formatter', '~>0.3')
@@ -0,0 +1,200 @@
1
+ # Configuration
2
+
3
+ Deimos supports a succinct, readable syntax which uses
4
+ pure Ruby to allow flexible configuration.
5
+
6
+ You can access any configuration value via a simple `Deimos.config.whatever`.
7
+
8
+ Nested configuration is denoted in simple dot notation:
9
+ `kafka.ssl.enabled`. Headings below will follow the nested
10
+ configurations.
11
+
12
+ ## Base Configuration
13
+ Config name|Default|Description
14
+ -----------|-------|-----------
15
+ logger|`Logger.new(STDOUT)`|The logger that Deimos will use.
16
+ phobos_logger|`Deimos.config.logger`|The logger passed to Phobos.
17
+ metrics|`Deimos::Metrics::Mock.new`|The metrics backend use for reporting.
18
+ tracer|`Deimos::Tracer::Mock.new`|The tracer backend used for debugging.
19
+
20
+ ## Defining Producers
21
+
22
+ You can define a new producer thusly:
23
+ ```ruby
24
+ Deimos.configure do
25
+ producer do
26
+ class_name 'MyProducer'
27
+ topic 'MyTopic'
28
+ schema 'MyTopicSchema'
29
+ namespace 'my.namespace'
30
+ key_config field: :id
31
+
32
+ # If config.schema.path is app/schemas, assumes there is a file in
33
+ # app/schemas/my/namespace/MyTopicSchema.avsc
34
+ end
35
+ end
36
+ ```
37
+
38
+ You can have as many `producer` blocks as you like to define more producers.
39
+ |Config name|Default|Description
40
+ -----------|-------|-----------
41
+ class_name|nil|Class name of the producer class (subclass of `Deimos::Producer`.)
42
+ topic|nil|Topic to produce to.
43
+ schema|nil|Name of the schema to use to encode data before producing.
44
+ namespace|nil|Namespace of the schema to use when finding it locally.
45
+ key_config|nil|Configuration hash for message keys. See [Kafka Message Keys](../README.md#installation)
46
+
47
+ ## Defining Consumers
48
+
49
+ Consumers are defined almost identically to producers:
50
+
51
+ ```ruby
52
+ Deimos.configure do
53
+ consumer do
54
+ class_name 'MyConsumer'
55
+ topic 'MyTopic'
56
+ schema 'MyTopicSchema'
57
+ namespace 'my.namespace'
58
+ key_config field: :id
59
+
60
+ # If config.schema.path is app/schemas, assumes there is a file in
61
+ # app/schemas/my/namespace/MyTopicSchema.avsc
62
+ end
63
+ end
64
+ ```
65
+
66
+ In addition to the producer configs, you can define a number of overrides
67
+ to the basic consumer configuration for each consumer. This is analogous to
68
+ the `listener` config in `phobos.yml`.
69
+ |Config name|Default|Description
70
+ -----------|-------|-----------
71
+ class_name|nil|Class name of the consumer class (subclass of `Deimos::Consumer`.)
72
+ topic|nil|Topic to produce to.
73
+ schema|nil|This is optional but strongly recommended for testing purposes; this will validate against a local schema file used as the reader schema, as well as being able to write tests against this schema. This is recommended since it ensures you are always getting the values you expect.
74
+ namespace|nil|Namespace of the schema to use when finding it locally.
75
+ key_config|nil|Configuration hash for message keys. See [Kafka Message Keys](../README.md#installation)
76
+ group_id|nil|ID of the consumer group.
77
+ max_concurrency|1|Number of threads created for this listener. Each thread will behave as an independent consumer. They don't share any state.
78
+ start_from_beginning|true|Once the consumer group has checkpointed its progress in the topic's partitions, the consumers will always start from the checkpointed offsets, regardless of config. As such, this setting only applies when the consumer initially starts consuming from a topic
79
+ max_bytes_per_partition|512.kilobytes|Maximum amount of data fetched from a single partition at a time.
80
+ min_bytes|1|Minimum number of bytes to read before returning messages from the server; if `max_wait_time` is reached, this is ignored.
81
+ max_wait_time|5|Maximum duration of time to wait before returning messages from the server, in seconds.
82
+ force_encoding|nil|Apply this encoding to the message payload. If blank it uses the original encoding. This property accepts values defined by the ruby Encoding class (https://ruby-doc.org/core-2.3.0/Encoding.html). Ex: UTF_8, ASCII_8BIT, etc.
83
+ delivery|`:batch`|The delivery mode for the consumer. Possible values: `:message, :batch, :inline_batch`. See Phobos documentation for more details.
84
+ session_timeout|300|Number of seconds after which, if a client hasn't contacted the Kafka cluster, it will be kicked out of the group.
85
+ offset_commit_interval|10|Interval between offset commits, in seconds.
86
+ offset_commit_threshold|0|Number of messages that can be processed before their offsets are committed. If zero, offset commits are not triggered by message processing
87
+ heartbeat_interval|10|Interval between heartbeats; must be less than the session window.
88
+ backoff|`(1000..60_000)`|Range representing the minimum and maximum number of milliseconds to back off after a consumer error.
89
+
90
+ ## Kafka Configuration
91
+ Config name|Default|Description
92
+ -----------|-------|-----------
93
+ kafka.logger|`Deimos.config.logger`|Logger passed to RubyKafka.
94
+ kafka.seed_brokers|`['localhost:9092']`|URL for the Kafka brokers.
95
+ kafka.client_id|`phobos`|Identifier for this application.
96
+ kafka.connect_timeout|15|The socket timeout for connecting to the broker, in seconds.
97
+ kafka.socket_timeout|15|The socket timeout for reading and writing to the broker, in seconds.
98
+ kafka.ssl.enabled|false|Whether SSL is enabled on the brokers.
99
+ kafka.ssl.ca_cert|nil| A PEM encoded CA cert, a file path to the cert, or an Array of certs to use with an SSL connection.
100
+ kafka.ssl.client_cert|nil|A PEM encoded client cert to use with an SSL connection, or a file path to the cert.
101
+ kafka.ssl.client_cert_key|nil|A PEM encoded client cert key to use with an SSL connection.
102
+
103
+ ## Consumer Configuration
104
+
105
+ These are top-level configuration settings, but they can be overridden
106
+ by individual consumers.
107
+
108
+ |Config name|Default|Description
109
+ -----------|-------|-----------
110
+ consumers.session_timeout|300|Number of seconds after which, if a client hasn't contacted the Kafka cluster, it will be kicked out of the group.
111
+ consumers.offset_commit_interval|10|Interval between offset commits, in seconds.
112
+ consumers.offset_commit_threshold|0|Number of messages that can be processed before their offsets are committed. If zero, offset commits are not triggered by message processing
113
+ consumers.heartbeat_interval|10|Interval between heartbeats; must be less than the session window.
114
+ consumers.backoff|`(1000..60_000)`|Range representing the minimum and maximum number of milliseconds to back off after a consumer error.
115
+ consumers.reraise_errors|false|Default behavior is to swallow uncaught exceptions and log to the metrics provider. Set this to true to instead raise all errors. Note that raising an error will ensure that the message cannot be processed - if there is a bad message which will always raise that error, your consumer will not be able to proceed past it and will be stuck forever until you fix your code. See also the `fatal_error` configuration. This is automatically set to true when using the `TestHelpers` module in RSpec.
116
+ consumers.report_lag|false|Whether to send the `consumer_lag` metric. This requires an extra thread per consumer.
117
+ consumers.fatal_error|`proc { false }`|Block taking an exception, payload and metadata and returning true if this should be considered a fatal error and false otherwise. E.g. you can use this to always fail if the database is available. Not needed if reraise_errors is set to true.
118
+
119
+ ## Producer Configuration
120
+ |Config name|Default|Description
121
+ -----------|-------|-----------
122
+ producers.ack_timeout|5|Number of seconds a broker can wait for replicas to acknowledge a write before responding with a timeout.
123
+ producers.required_acks|1|Number of replicas that must acknowledge a write, or `:all` if all in-sync replicas must acknowledge.
124
+ producers.max_retries|2|Number of retries that should be attempted before giving up sending messages to the cluster. Does not include the original attempt.
125
+ producers.retry_backoff|1|Number of seconds to wait between retries.
126
+ producers.max_buffer_size|10_000|Number of messages allowed in the buffer before new writes will raise `BufferOverflow` exceptions.
127
+ producers.max_buffer_bytesize|10_000_000|Maximum size of the buffer in bytes. Attempting to produce messages when the buffer reaches this size will result in `BufferOverflow` being raised.
128
+ producers.compression_codec|nil|Name of the compression codec to use, or nil if no compression should be performed. Valid codecs: `:snappy` and `:gzip`
129
+ producers.compression_threshold|1|Number of messages that needs to be in a message set before it should be compressed. Note that message sets are per-partition rather than per-topic or per-producer.
130
+ producers.max_queue_size|10_000|Maximum number of messages allowed in the queue. Only used for async_producer.
131
+ producers.delivery_threshold|0|If greater than zero, the number of buffered messages that will automatically trigger a delivery. Only used for async_producer.
132
+ producers.delivery_interval|0|if greater than zero, the number of seconds between automatic message deliveries. Only used for async_producer.
133
+ producers.persistent_connections|false|Set this to true to keep the producer connection between publish calls. This can speed up subsequent messages by around 30%, but it does mean that you need to manually call sync_producer_shutdown before exiting, similar to async_producer_shutdown.
134
+ producers.schema_namespace|nil|Default namespace for all producers. Can remain nil. Individual producers can override.
135
+ producers.topic_prefix|nil|Add a prefix to all topic names. This can be useful if you're using the same Kafka broker for different environments that are producing the same topics.
136
+ producers.disabled|false|Disable all actual message producing. Generally more useful to use the `disable_producers` method instead.
137
+ producers.backend|`:kafka_async`|Currently can be set to `:db`, `:kafka`, or `:kafka_async`. If using Kafka directly, a good pattern is to set to async in your user-facing app, and sync in your consumers or delayed workers.
138
+
139
+ ## Schema Configuration
140
+ |Config name|Default|Description
141
+ -----------|-------|-----------
142
+ schema.registry_url|`http://localhost:8081`|URL of the Confluent schema registry.
143
+ schema.path|nil|Local path to find your schemas.
144
+
145
+ ## Database Producer Configuration
146
+ |Config name|Default|Description
147
+ -----------|-------|-----------
148
+ db_producer.logger|`Deimos.config.logger`|Logger to use inside the DB producer.
149
+ db_producer.log_topics|`[]`|List of topics to print full messages for, or `:all` to print all topics. This can introduce slowdown since it needs to decode each message using the schema registry.
150
+ db_producer.compact_topics|`[]`|List of topics to compact before sending, i.e. only send the last message with any given key in a batch. This is an optimization which mirrors what Kafka itself will do with compaction turned on but only within a single batch. You can also specify `:all` to compact all topics.
151
+
152
+ ## Configuration Syntax
153
+
154
+ Sample:
155
+
156
+ ```ruby
157
+ Deimos.configure do
158
+ logger Logger.new(STDOUT)
159
+ # Nested config field
160
+ kafka.seed_brokers ['my.kafka.broker:9092']
161
+
162
+ # Multiple nested config fields via block
163
+ consumers do
164
+ session_timeout 30
165
+ offset_commit_interval 10
166
+ end
167
+
168
+ # Define a new producer
169
+ producer do
170
+ class_name 'MyProducer'
171
+ topic 'MyTopic'
172
+ schema 'MyTopicSchema'
173
+ key_config field: :id
174
+ end
175
+
176
+ # Define another new producer
177
+ producer do
178
+ class_name 'AnotherProducer'
179
+ topic 'AnotherTopic'
180
+ schema 'AnotherSchema'
181
+ key_config plain: true
182
+ end
183
+
184
+ # Define a consumer
185
+ consumer do
186
+ class_name 'MyConsumer'
187
+ topic 'TopicToConsume'
188
+ schema 'ConsumerSchema'
189
+ key_config plain: true
190
+ # include Phobos / RubyKafka configs
191
+ start_from_beginning true
192
+ heartbeat_interval 10
193
+ end
194
+
195
+ end
196
+ ```
197
+
198
+ Note that all blocks are evaluated in the context of the configuration object.
199
+ If you're calling this inside another class or method, you'll need to save
200
+ things you need to reference into local variables before calling `configure`.