deimos-ruby 1.24.2 → 2.0.0.pre.alpha1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop_todo.yml +0 -17
  3. data/.tool-versions +1 -0
  4. data/CHANGELOG.md +5 -0
  5. data/README.md +287 -498
  6. data/deimos-ruby.gemspec +4 -4
  7. data/docs/CONFIGURATION.md +133 -226
  8. data/docs/UPGRADING.md +237 -0
  9. data/lib/deimos/active_record_consume/batch_consumption.rb +29 -28
  10. data/lib/deimos/active_record_consume/mass_updater.rb +59 -4
  11. data/lib/deimos/active_record_consume/message_consumption.rb +15 -21
  12. data/lib/deimos/active_record_consumer.rb +36 -21
  13. data/lib/deimos/active_record_producer.rb +28 -9
  14. data/lib/deimos/backends/base.rb +4 -35
  15. data/lib/deimos/backends/kafka.rb +6 -22
  16. data/lib/deimos/backends/kafka_async.rb +6 -22
  17. data/lib/deimos/backends/{db.rb → outbox.rb} +13 -9
  18. data/lib/deimos/config/configuration.rb +116 -379
  19. data/lib/deimos/consume/batch_consumption.rb +24 -124
  20. data/lib/deimos/consume/message_consumption.rb +36 -63
  21. data/lib/deimos/consumer.rb +16 -75
  22. data/lib/deimos/ext/consumer_route.rb +35 -0
  23. data/lib/deimos/ext/producer_middleware.rb +94 -0
  24. data/lib/deimos/ext/producer_route.rb +22 -0
  25. data/lib/deimos/ext/redraw.rb +29 -0
  26. data/lib/deimos/ext/routing_defaults.rb +72 -0
  27. data/lib/deimos/ext/schema_route.rb +70 -0
  28. data/lib/deimos/kafka_message.rb +2 -2
  29. data/lib/deimos/kafka_source.rb +2 -7
  30. data/lib/deimos/kafka_topic_info.rb +1 -1
  31. data/lib/deimos/logging.rb +71 -0
  32. data/lib/deimos/message.rb +2 -11
  33. data/lib/deimos/metrics/datadog.rb +40 -1
  34. data/lib/deimos/metrics/provider.rb +4 -4
  35. data/lib/deimos/producer.rb +39 -116
  36. data/lib/deimos/railtie.rb +6 -0
  37. data/lib/deimos/schema_backends/avro_base.rb +21 -21
  38. data/lib/deimos/schema_backends/avro_schema_registry.rb +1 -2
  39. data/lib/deimos/schema_backends/avro_validation.rb +2 -2
  40. data/lib/deimos/schema_backends/base.rb +19 -12
  41. data/lib/deimos/schema_backends/mock.rb +6 -1
  42. data/lib/deimos/schema_backends/plain.rb +47 -0
  43. data/lib/deimos/schema_class/base.rb +2 -2
  44. data/lib/deimos/schema_class/enum.rb +1 -1
  45. data/lib/deimos/schema_class/record.rb +2 -2
  46. data/lib/deimos/test_helpers.rb +95 -320
  47. data/lib/deimos/tracing/provider.rb +6 -6
  48. data/lib/deimos/transcoder.rb +88 -0
  49. data/lib/deimos/utils/db_poller/base.rb +16 -14
  50. data/lib/deimos/utils/db_poller/state_based.rb +3 -3
  51. data/lib/deimos/utils/db_poller/time_based.rb +4 -4
  52. data/lib/deimos/utils/db_poller.rb +1 -1
  53. data/lib/deimos/utils/deadlock_retry.rb +1 -1
  54. data/lib/deimos/utils/{db_producer.rb → outbox_producer.rb} +16 -47
  55. data/lib/deimos/utils/schema_class.rb +0 -7
  56. data/lib/deimos/version.rb +1 -1
  57. data/lib/deimos.rb +79 -26
  58. data/lib/generators/deimos/{db_backend_generator.rb → outbox_backend_generator.rb} +4 -4
  59. data/lib/generators/deimos/schema_class_generator.rb +0 -1
  60. data/lib/generators/deimos/v2/templates/karafka.rb.tt +149 -0
  61. data/lib/generators/deimos/v2_generator.rb +193 -0
  62. data/lib/tasks/deimos.rake +5 -7
  63. data/spec/active_record_batch_consumer_association_spec.rb +22 -13
  64. data/spec/active_record_batch_consumer_spec.rb +84 -65
  65. data/spec/active_record_consume/batch_consumption_spec.rb +10 -10
  66. data/spec/active_record_consume/batch_slicer_spec.rb +12 -12
  67. data/spec/active_record_consume/mass_updater_spec.rb +137 -0
  68. data/spec/active_record_consumer_spec.rb +29 -13
  69. data/spec/active_record_producer_spec.rb +36 -26
  70. data/spec/backends/base_spec.rb +0 -23
  71. data/spec/backends/kafka_async_spec.rb +1 -3
  72. data/spec/backends/kafka_spec.rb +1 -3
  73. data/spec/backends/{db_spec.rb → outbox_spec.rb} +14 -20
  74. data/spec/batch_consumer_spec.rb +66 -116
  75. data/spec/consumer_spec.rb +53 -147
  76. data/spec/deimos_spec.rb +10 -126
  77. data/spec/kafka_source_spec.rb +19 -52
  78. data/spec/karafka/karafka.rb +69 -0
  79. data/spec/karafka_config/karafka_spec.rb +97 -0
  80. data/spec/logging_spec.rb +25 -0
  81. data/spec/message_spec.rb +9 -9
  82. data/spec/producer_spec.rb +112 -254
  83. data/spec/rake_spec.rb +1 -3
  84. data/spec/schema_backends/avro_validation_spec.rb +1 -1
  85. data/spec/schemas/com/my-namespace/MySchemaWithTitle.avsc +22 -0
  86. data/spec/snapshots/consumers-no-nest.snap +49 -0
  87. data/spec/snapshots/consumers.snap +49 -0
  88. data/spec/snapshots/consumers_and_producers-no-nest.snap +49 -0
  89. data/spec/snapshots/consumers_and_producers.snap +49 -0
  90. data/spec/snapshots/consumers_circular-no-nest.snap +49 -0
  91. data/spec/snapshots/consumers_circular.snap +49 -0
  92. data/spec/snapshots/consumers_complex_types-no-nest.snap +49 -0
  93. data/spec/snapshots/consumers_complex_types.snap +49 -0
  94. data/spec/snapshots/consumers_nested-no-nest.snap +49 -0
  95. data/spec/snapshots/consumers_nested.snap +49 -0
  96. data/spec/snapshots/namespace_folders.snap +49 -0
  97. data/spec/snapshots/namespace_map.snap +49 -0
  98. data/spec/snapshots/producers_with_key-no-nest.snap +49 -0
  99. data/spec/snapshots/producers_with_key.snap +49 -0
  100. data/spec/spec_helper.rb +61 -29
  101. data/spec/utils/db_poller_spec.rb +49 -39
  102. data/spec/utils/{db_producer_spec.rb → outbox_producer_spec.rb} +17 -184
  103. metadata +58 -67
  104. data/lib/deimos/batch_consumer.rb +0 -7
  105. data/lib/deimos/config/phobos_config.rb +0 -163
  106. data/lib/deimos/instrumentation.rb +0 -95
  107. data/lib/deimos/monkey_patches/phobos_cli.rb +0 -35
  108. data/lib/deimos/utils/inline_consumer.rb +0 -158
  109. data/lib/deimos/utils/lag_reporter.rb +0 -186
  110. data/lib/deimos/utils/schema_controller_mixin.rb +0 -129
  111. data/spec/config/configuration_spec.rb +0 -321
  112. data/spec/kafka_listener_spec.rb +0 -55
  113. data/spec/phobos.bad_db.yml +0 -73
  114. data/spec/phobos.yml +0 -77
  115. data/spec/utils/inline_consumer_spec.rb +0 -31
  116. data/spec/utils/lag_reporter_spec.rb +0 -76
  117. data/spec/utils/platform_schema_validation_spec.rb +0 -0
  118. data/spec/utils/schema_controller_mixin_spec.rb +0 -84
  119. /data/lib/generators/deimos/{db_backend → outbox_backend}/templates/migration +0 -0
  120. /data/lib/generators/deimos/{db_backend → outbox_backend}/templates/rails3_migration +0 -0
@@ -1,321 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- # Mock consumer
4
- class MyConfigConsumer < Deimos::Consumer
5
- # :no-doc:
6
- def consume
7
- end
8
- end
9
-
10
- # Mock consumer 2
11
- class MyConfigConsumer2 < Deimos::Consumer
12
- # :no-doc:
13
- def consume
14
- end
15
- end
16
-
17
- describe Deimos, 'configuration' do
18
- it 'should configure with deprecated fields' do
19
- logger = Logger.new(nil)
20
- described_class.configure do
21
- kafka_logger logger
22
- reraise_consumer_errors true
23
- schema_registry_url 'http://schema.registry'
24
- schema.use_schema_classes false
25
- seed_broker 'whatever'
26
- schema_path 'some_path'
27
- producer_schema_namespace 'namespace'
28
- producer_topic_prefix 'prefix'
29
- disable_producers true
30
- ssl_enabled true
31
- ssl_ca_cert 'cert'
32
- ssl_client_cert 'cert'
33
- ssl_client_cert_key 'key'
34
- publish_backend 'db'
35
- report_lag true
36
- end
37
-
38
- expect(described_class.config.kafka.logger).to eq(logger)
39
- expect(described_class.config.consumers.reraise_errors).to eq(true)
40
- expect(described_class.config.schema.registry_url).to eq('http://schema.registry')
41
- expect(described_class.config.schema.use_schema_classes).to eq(false)
42
- expect(described_class.config.kafka.seed_brokers).to eq('whatever')
43
- expect(described_class.config.producers.schema_namespace).to eq('namespace')
44
- expect(described_class.config.producers.topic_prefix).to eq('prefix')
45
- expect(described_class.config.producers.disabled).to eq(true)
46
- expect(described_class.config.kafka.ssl.enabled).to eq(true)
47
- expect(described_class.config.kafka.ssl.ca_cert).to eq('cert')
48
- expect(described_class.config.kafka.ssl.client_cert).to eq('cert')
49
- expect(described_class.config.kafka.ssl.client_cert_key).to eq('key')
50
- expect(described_class.config.producers.backend).to eq('db')
51
- expect(described_class.config.consumers.report_lag).to eq(true)
52
- end
53
-
54
- it 'reads existing Phobos config YML files' do
55
- described_class.config.reset!
56
- described_class.configure { |c| c.phobos_config_file = File.join(File.dirname(__FILE__), '..', 'phobos.yml') }
57
- expect(described_class.config.phobos_config).to match(
58
- logger: an_instance_of(Logger),
59
- backoff: { min_ms: 1000, max_ms: 60_000 },
60
- consumer: {
61
- session_timeout: 300,
62
- offset_commit_interval: 10,
63
- offset_commit_threshold: 0,
64
- heartbeat_interval: 10
65
- },
66
- custom_kafka_logger: an_instance_of(Logger),
67
- custom_logger: an_instance_of(Logger),
68
- kafka: {
69
- client_id: 'phobos',
70
- connect_timeout: 15,
71
- socket_timeout: 15,
72
- ssl_verify_hostname: true,
73
- ssl_ca_certs_from_system: false,
74
- seed_brokers: ['localhost:9092']
75
- },
76
- listeners: [
77
- {
78
- topic: 'my_consume_topic',
79
- group_id: 'my_group_id',
80
- max_concurrency: 1,
81
- start_from_beginning: true,
82
- max_bytes_per_partition: 524_288,
83
- min_bytes: 1,
84
- max_wait_time: 5,
85
- force_encoding: nil,
86
- delivery: 'batch',
87
- session_timeout: 300,
88
- offset_commit_interval: 10,
89
- offset_commit_threshold: 0,
90
- offset_retention_time: nil,
91
- heartbeat_interval: 10,
92
- handler: 'ConsumerTest::MyConsumer',
93
- use_schema_classes: nil,
94
- max_db_batch_size: nil,
95
- bulk_import_id_generator: nil
96
- }, {
97
- topic: 'my_batch_consume_topic',
98
- group_id: 'my_batch_group_id',
99
- max_concurrency: 1,
100
- start_from_beginning: true,
101
- max_bytes_per_partition: 500.kilobytes,
102
- min_bytes: 1,
103
- max_wait_time: 5,
104
- force_encoding: nil,
105
- delivery: 'inline_batch',
106
- session_timeout: 300,
107
- offset_commit_interval: 10,
108
- offset_commit_threshold: 0,
109
- offset_retention_time: nil,
110
- heartbeat_interval: 10,
111
- handler: 'ConsumerTest::MyBatchConsumer',
112
- use_schema_classes: nil,
113
- max_db_batch_size: nil,
114
- bulk_import_id_generator: nil
115
- }
116
- ],
117
- producer: {
118
- ack_timeout: 5,
119
- required_acks: :all,
120
- max_retries: 2,
121
- retry_backoff: 1,
122
- max_buffer_size: 10_000,
123
- max_buffer_bytesize: 10_000_000,
124
- compression_codec: nil,
125
- compression_threshold: 1,
126
- max_queue_size: 10_000,
127
- delivery_threshold: 0,
128
- delivery_interval: 0
129
- }
130
- )
131
- end
132
-
133
- specify '#phobos_config' do
134
- logger1 = Logger.new(nil)
135
- logger2 = Logger.new(nil)
136
- described_class.config.reset!
137
- described_class.configure do
138
- phobos_logger logger1
139
- kafka do
140
- logger logger2
141
- seed_brokers 'my-seed-brokers'
142
- client_id 'phobos2'
143
- connect_timeout 30
144
- socket_timeout 30
145
- ssl.enabled(true)
146
- ssl.ca_certs_from_system(true)
147
- ssl.ca_cert('cert')
148
- ssl.client_cert('cert')
149
- ssl.client_cert_key('key')
150
- ssl.verify_hostname(false)
151
- sasl.enabled true
152
- sasl.gssapi_principal 'gssapi_principal'
153
- sasl.gssapi_keytab 'gssapi_keytab'
154
- sasl.plain_authzid 'plain_authzid'
155
- sasl.plain_username 'plain_username'
156
- sasl.plain_password 'plain_password'
157
- sasl.scram_username 'scram_username'
158
- sasl.scram_password 'scram_password'
159
- sasl.scram_mechanism 'scram_mechanism'
160
- sasl.enforce_ssl true
161
- sasl.oauth_token_provider 'oauth_token_provider'
162
- end
163
- consumers do
164
- session_timeout 30
165
- offset_commit_interval 5
166
- offset_commit_threshold 0
167
- heartbeat_interval 5
168
- backoff 5..10
169
- end
170
- producers do
171
- ack_timeout 3
172
- required_acks 1
173
- max_retries 1
174
- retry_backoff 2
175
- max_buffer_size 5
176
- max_buffer_bytesize 5
177
- compression_codec :snappy
178
- compression_threshold 2
179
- max_queue_size 10
180
- delivery_threshold 1
181
- delivery_interval 1
182
- persistent_connections true
183
- end
184
- consumer do
185
- class_name 'MyConfigConsumer'
186
- schema 'blah'
187
- topic 'blah'
188
- group_id 'myconsumerid'
189
- max_concurrency 1
190
- start_from_beginning true
191
- max_bytes_per_partition 10
192
- min_bytes 5
193
- max_wait_time 5
194
- force_encoding true
195
- delivery :message
196
- backoff 100..200
197
- session_timeout 10
198
- offset_commit_interval 13
199
- offset_commit_threshold 13
200
- offset_retention_time 13
201
- heartbeat_interval 13
202
- use_schema_classes false
203
- end
204
- consumer do
205
- disabled true
206
- class_name 'MyConfigConsumer2'
207
- schema 'blah2'
208
- topic 'blah2'
209
- group_id 'myconsumerid2'
210
- use_schema_classes false
211
- end
212
- end
213
-
214
- expect(described_class.config.phobos_config).
215
- to match(
216
- logger: an_instance_of(Logger),
217
- backoff: { min_ms: 5, max_ms: 10 },
218
- consumer: {
219
- session_timeout: 30,
220
- offset_commit_interval: 5,
221
- offset_commit_threshold: 0,
222
- heartbeat_interval: 5
223
- },
224
- custom_kafka_logger: logger2,
225
- custom_logger: logger1,
226
- kafka: {
227
- client_id: 'phobos2',
228
- connect_timeout: 30,
229
- socket_timeout: 30,
230
- ssl_ca_certs_from_system: true,
231
- ssl_ca_cert: 'cert',
232
- ssl_client_cert: 'cert',
233
- ssl_client_cert_key: 'key',
234
- ssl_verify_hostname: false,
235
- seed_brokers: ['my-seed-brokers'],
236
- sasl_gssapi_principal: 'gssapi_principal',
237
- sasl_gssapi_keytab: 'gssapi_keytab',
238
- sasl_plain_authzid: 'plain_authzid',
239
- sasl_plain_username: 'plain_username',
240
- sasl_plain_password: 'plain_password',
241
- sasl_scram_username: 'scram_username',
242
- sasl_scram_password: 'scram_password',
243
- sasl_scram_mechanism: 'scram_mechanism',
244
- sasl_over_ssl: true,
245
- sasl_oauth_token_provider: 'oauth_token_provider',
246
- },
247
- listeners: [
248
- {
249
- topic: 'blah',
250
- group_id: 'myconsumerid',
251
- max_concurrency: 1,
252
- start_from_beginning: true,
253
- max_bytes_per_partition: 10,
254
- min_bytes: 5,
255
- max_wait_time: 5,
256
- force_encoding: true,
257
- delivery: 'message',
258
- backoff: { min_ms: 100, max_ms: 200 },
259
- session_timeout: 10,
260
- offset_commit_interval: 13,
261
- offset_commit_threshold: 13,
262
- offset_retention_time: 13,
263
- heartbeat_interval: 13,
264
- handler: 'MyConfigConsumer',
265
- use_schema_classes: false,
266
- max_db_batch_size: nil,
267
- bulk_import_id_generator: nil
268
- }
269
- ],
270
- producer: {
271
- ack_timeout: 3,
272
- required_acks: 1,
273
- max_retries: 1,
274
- retry_backoff: 2,
275
- max_buffer_size: 5,
276
- max_buffer_bytesize: 5,
277
- compression_codec: :snappy,
278
- compression_threshold: 2,
279
- max_queue_size: 10,
280
- delivery_threshold: 1,
281
- delivery_interval: 1
282
- }
283
- )
284
- end
285
-
286
- it 'should override global configurations' do
287
- described_class.configure do
288
- consumers.bulk_import_id_generator(-> { 'global' })
289
- consumers.replace_associations true
290
-
291
- consumer do
292
- class_name 'MyConfigConsumer'
293
- schema 'blah'
294
- topic 'blah'
295
- group_id 'myconsumerid'
296
- bulk_import_id_generator(-> { 'consumer' })
297
- replace_associations false
298
- end
299
-
300
- consumer do
301
- class_name 'MyConfigConsumer2'
302
- schema 'blah'
303
- topic 'blah'
304
- group_id 'myconsumerid'
305
- end
306
- end
307
-
308
- consumers = described_class.config.consumers
309
- expect(consumers.replace_associations).to eq(true)
310
- expect(consumers.bulk_import_id_generator.call).to eq('global')
311
-
312
- custom = MyConfigConsumer.config
313
- expect(custom[:replace_associations]).to eq(false)
314
- expect(custom[:bulk_import_id_generator].call).to eq('consumer')
315
-
316
- default = MyConfigConsumer2.config
317
- expect(default[:replace_associations]).to eq(true)
318
- expect(default[:bulk_import_id_generator].call).to eq('global')
319
-
320
- end
321
- end
@@ -1,55 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- describe Deimos::KafkaListener do
4
- include_context 'with widgets'
5
-
6
- prepend_before(:each) do
7
- producer_class = Class.new(Deimos::Producer) do
8
- schema 'MySchema'
9
- namespace 'com.my-namespace'
10
- topic 'my-topic'
11
- key_config none: true
12
- end
13
- stub_const('MyProducer', producer_class)
14
- end
15
-
16
- before(:each) do
17
- Deimos.configure do |c|
18
- c.producers.backend = :kafka
19
- c.schema.backend = :avro_local
20
- end
21
- allow_any_instance_of(Kafka::Cluster).to receive(:add_target_topics)
22
- allow_any_instance_of(Kafka::Cluster).to receive(:partitions_for).
23
- and_raise(Kafka::Error)
24
- end
25
-
26
- describe '.send_produce_error' do
27
- let(:payloads) do
28
- [{ 'test_id' => 'foo', 'some_int' => 123 },
29
- { 'test_id' => 'bar', 'some_int' => 124 }]
30
- end
31
-
32
- it 'should listen to publishing errors and republish as Deimos events' do
33
- allow(Deimos::Producer).to receive(:descendants).and_return([MyProducer])
34
- Deimos.subscribe('produce_error') do |event|
35
- expect(event.payload).to include(
36
- producer: MyProducer,
37
- topic: 'my-topic',
38
- payloads: payloads
39
- )
40
- end
41
- expect(Deimos.config.metrics).to receive(:increment).
42
- with('publish_error', tags: %w(topic:my-topic), by: 2)
43
- expect { MyProducer.publish_list(payloads) }.to raise_error(Kafka::DeliveryFailed)
44
- end
45
-
46
- it 'should not send any notifications when producer is not found' do
47
- Deimos.subscribe('produce_error') do |_|
48
- raise 'OH NOES'
49
- end
50
- allow(Deimos::Producer).to receive(:descendants).and_return([])
51
- expect(Deimos.config.metrics).not_to receive(:increment).with('publish_error', anything)
52
- expect { MyProducer.publish_list(payloads) }.to raise_error(Kafka::DeliveryFailed)
53
- end
54
- end
55
- end
@@ -1,73 +0,0 @@
1
- logger:
2
- # Optional log file, set to false or remove to disable it
3
- file: log/phobos.log
4
- # Optional output format for stdout, default is false (human readable).
5
- # Set to true to enable json output.
6
- stdout_json: false
7
- level: debug
8
- # Comment the block to disable ruby-kafka logs
9
- ruby_kafka:
10
- level: debug
11
-
12
- kafka:
13
- # identifier for this application
14
- client_id: phobos
15
- # timeout setting for connecting to brokers
16
- connect_timeout: 15
17
- # timeout setting for socket connections
18
- socket_timeout: 15
19
-
20
- producer:
21
- # number of seconds a broker can wait for replicas to acknowledge
22
- # a write before responding with a timeout
23
- ack_timeout: 5
24
- # number of replicas that must acknowledge a write, or `:all`
25
- # if all in-sync replicas must acknowledge
26
- required_acks: 1
27
- # number of retries that should be attempted before giving up sending
28
- # messages to the cluster. Does not include the original attempt
29
- max_retries: 2
30
- # number of seconds to wait between retries
31
- retry_backoff: 1
32
- # number of messages allowed in the buffer before new writes will
33
- # raise {BufferOverflow} exceptions
34
- max_buffer_size: 10000
35
- # maximum size of the buffer in bytes. Attempting to produce messages
36
- # when the buffer reaches this size will result in {BufferOverflow} being raised
37
- max_buffer_bytesize: 10000000
38
- # name of the compression codec to use, or nil if no compression should be performed.
39
- # Valid codecs: `:snappy` and `:gzip`
40
- compression_codec:
41
- # number of messages that needs to be in a message set before it should be compressed.
42
- # Note that message sets are per-partition rather than per-topic or per-producer
43
- compression_threshold: 1
44
- # maximum number of messages allowed in the queue. Only used for async_producer
45
- max_queue_size: 10000
46
- # if greater than zero, the number of buffered messages that will automatically
47
- # trigger a delivery. Only used for async_producer
48
- delivery_threshold: 0
49
- # if greater than zero, the number of seconds between automatic message
50
- # deliveries. Only used for async_producer
51
- delivery_interval: 0
52
-
53
- consumer:
54
- # number of seconds after which, if a client hasn't contacted the Kafka cluster,
55
- # it will be kicked out of the group
56
- session_timeout: 300
57
- # interval between offset commits, in seconds
58
- offset_commit_interval: 10
59
- # number of messages that can be processed before their offsets are committed.
60
- # If zero, offset commits are not triggered by message processing
61
- offset_commit_threshold: 0
62
- # interval between heartbeats; must be less than the session window
63
- heartbeat_interval: 10
64
-
65
- backoff:
66
- min_ms: 1000
67
- max_ms: 60000
68
-
69
- listeners:
70
- - handler: ConsumerTest::MyConsumer
71
- topic: my_consume_topic
72
- group_id: my_group_id
73
- max_bytes_per_partition: 524288 # 512 KB
data/spec/phobos.yml DELETED
@@ -1,77 +0,0 @@
1
- logger:
2
- # Optional log file, set to false or remove to disable it
3
- file: log/phobos.log
4
- # Optional output format for stdout, default is false (human readable).
5
- # Set to true to enable json output.
6
- stdout_json: false
7
- level: debug
8
- # Comment the block to disable ruby-kafka logs
9
- ruby_kafka:
10
- level: debug
11
-
12
- kafka:
13
- # identifier for this application
14
- client_id: phobos
15
- # timeout setting for connecting to brokers
16
- connect_timeout: 15
17
- # timeout setting for socket connections
18
- socket_timeout: 15
19
-
20
- producer:
21
- # number of seconds a broker can wait for replicas to acknowledge
22
- # a write before responding with a timeout
23
- ack_timeout: 5
24
- # number of replicas that must acknowledge a write, or `:all`
25
- # if all in-sync replicas must acknowledge
26
- required_acks: :all
27
- # number of retries that should be attempted before giving up sending
28
- # messages to the cluster. Does not include the original attempt
29
- max_retries: 2
30
- # number of seconds to wait between retries
31
- retry_backoff: 1
32
- # number of messages allowed in the buffer before new writes will
33
- # raise {BufferOverflow} exceptions
34
- max_buffer_size: 10000
35
- # maximum size of the buffer in bytes. Attempting to produce messages
36
- # when the buffer reaches this size will result in {BufferOverflow} being raised
37
- max_buffer_bytesize: 10000000
38
- # name of the compression codec to use, or nil if no compression should be performed.
39
- # Valid codecs: `:snappy` and `:gzip`
40
- compression_codec:
41
- # number of messages that needs to be in a message set before it should be compressed.
42
- # Note that message sets are per-partition rather than per-topic or per-producer
43
- compression_threshold: 1
44
- # maximum number of messages allowed in the queue. Only used for async_producer
45
- max_queue_size: 10000
46
- # if greater than zero, the number of buffered messages that will automatically
47
- # trigger a delivery. Only used for async_producer
48
- delivery_threshold: 0
49
- # if greater than zero, the number of seconds between automatic message
50
- # deliveries. Only used for async_producer
51
- delivery_interval: 0
52
-
53
- consumer:
54
- # number of seconds after which, if a client hasn't contacted the Kafka cluster,
55
- # it will be kicked out of the group
56
- session_timeout: 300
57
- # interval between offset commits, in seconds
58
- offset_commit_interval: 10
59
- # number of messages that can be processed before their offsets are committed.
60
- # If zero, offset commits are not triggered by message processing
61
- offset_commit_threshold: 0
62
- # interval between heartbeats; must be less than the session window
63
- heartbeat_interval: 10
64
-
65
- backoff:
66
- min_ms: 1000
67
- max_ms: 60000
68
-
69
- listeners:
70
- - handler: ConsumerTest::MyConsumer
71
- topic: my_consume_topic
72
- group_id: my_group_id
73
- max_bytes_per_partition: 524288 # 512 KB
74
- - handler: ConsumerTest::MyBatchConsumer
75
- topic: my_batch_consume_topic
76
- group_id: my_batch_group_id
77
- delivery: inline_batch
@@ -1,31 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- describe Deimos::Utils::SeekListener do
4
-
5
- describe '#start_listener' do
6
- let(:consumer) { instance_double(Kafka::Consumer) }
7
- let(:handler) { class_double(Deimos::Utils::MessageBankHandler) }
8
-
9
- before(:each) do
10
- allow(handler).to receive(:start)
11
- allow(consumer).to receive(:subscribe)
12
- allow_any_instance_of(Phobos::Listener).to receive(:create_kafka_consumer).and_return(consumer)
13
- allow_any_instance_of(Kafka::Client).to receive(:last_offset_for).and_return(100)
14
- stub_const('Deimos::Utils::SeekListener::MAX_SEEK_RETRIES', 2)
15
- end
16
-
17
- it 'should seek offset' do
18
- allow(consumer).to receive(:seek)
19
- expect(consumer).to receive(:seek).once
20
- seek_listener = described_class.new(handler: handler, group_id: 999, topic: 'test_topic')
21
- seek_listener.start_listener
22
- end
23
-
24
- it 'should retry on errors when seeking offset' do
25
- allow(consumer).to receive(:seek).and_raise(StandardError)
26
- expect(consumer).to receive(:seek).twice
27
- seek_listener = described_class.new(handler: handler, group_id: 999, topic: 'test_topic')
28
- seek_listener.start_listener
29
- end
30
- end
31
- end
@@ -1,76 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- describe Deimos::Utils::LagReporter do
4
-
5
- let(:kafka_client) { instance_double(Kafka::Client) }
6
- let(:partition1_tags) { %w(consumer_group:group1 partition:1 topic:my-topic) }
7
- let(:partition2_tags) { %w(consumer_group:group1 partition:2 topic:my-topic) }
8
-
9
- before(:each) do
10
- allow(kafka_client).to receive(:last_offset_for).and_return(100)
11
- allow(Phobos).to receive(:create_kafka_client).and_return(kafka_client)
12
- Deimos.configure { |c| c.consumers.report_lag = true }
13
- end
14
-
15
- after(:each) do
16
- described_class.reset
17
- Deimos.configure { |c| c.consumers.report_lag = false }
18
- end
19
-
20
- it 'should not report lag before ready' do
21
- expect(Deimos.config.metrics).not_to receive(:gauge)
22
- ActiveSupport::Notifications.instrument(
23
- 'heartbeat.consumer.kafka',
24
- group_id: 'group1', topic_partitions: { 'my-topic': [1] }
25
- )
26
- end
27
-
28
- it 'should report lag' do
29
- expect(Deimos.config.metrics).to receive(:gauge).ordered.twice.
30
- with('consumer_lag', 95, tags: partition1_tags)
31
- expect(Deimos.config.metrics).to receive(:gauge).ordered.once.
32
- with('consumer_lag', 80, tags: partition2_tags)
33
- expect(Deimos.config.metrics).to receive(:gauge).ordered.once.
34
- with('consumer_lag', 0, tags: partition2_tags)
35
- ActiveSupport::Notifications.instrument(
36
- 'seek.consumer.kafka',
37
- offset: 5, topic: 'my-topic', group_id: 'group1', partition: 1
38
- )
39
- ActiveSupport::Notifications.instrument(
40
- 'start_process_message.consumer.kafka',
41
- offset: 20, topic: 'my-topic', group_id: 'group1', partition: 2
42
- )
43
- ActiveSupport::Notifications.instrument(
44
- 'heartbeat.consumer.kafka',
45
- group_id: 'group1', topic_partitions: { 'my-topic': [1, 2] }
46
- )
47
- ActiveSupport::Notifications.instrument(
48
- 'start_process_batch.consumer.kafka',
49
- last_offset: 100, topic: 'my-topic', group_id: 'group1', partition: 2
50
- )
51
- ActiveSupport::Notifications.instrument(
52
- 'heartbeat.consumer.kafka',
53
- group_id: 'group1', topic_partitions: { 'my-topic': [1, 2] }
54
- )
55
- end
56
-
57
- it 'should update lag after heartbeat' do
58
- expect(Deimos.config.metrics).to receive(:gauge).ordered.once.
59
- with('consumer_lag', 94, tags: partition2_tags)
60
- expect(Deimos.config.metrics).to receive(:gauge).ordered.once.
61
- with('consumer_lag', 95, tags: partition2_tags)
62
- ActiveSupport::Notifications.instrument(
63
- 'seek.consumer.kafka',
64
- offset: 6, topic: 'my-topic', group_id: 'group1', partition: 2
65
- )
66
- ActiveSupport::Notifications.instrument(
67
- 'heartbeat.consumer.kafka',
68
- group_id: 'group1', topic_partitions: { 'my-topic': [1, 2] }
69
- )
70
- allow(kafka_client).to receive(:last_offset_for).and_return(101)
71
- ActiveSupport::Notifications.instrument(
72
- 'heartbeat.consumer.kafka',
73
- group_id: 'group1', topic_partitions: { 'my-topic': [1, 2] }
74
- )
75
- end
76
- end
File without changes