deimos-ruby 1.24.3 → 2.0.0.pre.alpha1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (118) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop_todo.yml +0 -17
  3. data/.tool-versions +1 -0
  4. data/CHANGELOG.md +1 -1
  5. data/README.md +287 -498
  6. data/deimos-ruby.gemspec +4 -4
  7. data/docs/CONFIGURATION.md +133 -227
  8. data/docs/UPGRADING.md +237 -0
  9. data/lib/deimos/active_record_consume/batch_consumption.rb +28 -29
  10. data/lib/deimos/active_record_consume/message_consumption.rb +15 -21
  11. data/lib/deimos/active_record_consumer.rb +36 -26
  12. data/lib/deimos/active_record_producer.rb +28 -9
  13. data/lib/deimos/backends/base.rb +4 -35
  14. data/lib/deimos/backends/kafka.rb +6 -22
  15. data/lib/deimos/backends/kafka_async.rb +6 -22
  16. data/lib/deimos/backends/{db.rb → outbox.rb} +13 -9
  17. data/lib/deimos/config/configuration.rb +116 -385
  18. data/lib/deimos/consume/batch_consumption.rb +24 -124
  19. data/lib/deimos/consume/message_consumption.rb +36 -63
  20. data/lib/deimos/consumer.rb +16 -75
  21. data/lib/deimos/ext/consumer_route.rb +35 -0
  22. data/lib/deimos/ext/producer_middleware.rb +94 -0
  23. data/lib/deimos/ext/producer_route.rb +22 -0
  24. data/lib/deimos/ext/redraw.rb +29 -0
  25. data/lib/deimos/ext/routing_defaults.rb +72 -0
  26. data/lib/deimos/ext/schema_route.rb +70 -0
  27. data/lib/deimos/kafka_message.rb +2 -2
  28. data/lib/deimos/kafka_source.rb +2 -7
  29. data/lib/deimos/kafka_topic_info.rb +1 -1
  30. data/lib/deimos/logging.rb +71 -0
  31. data/lib/deimos/message.rb +2 -11
  32. data/lib/deimos/metrics/datadog.rb +40 -1
  33. data/lib/deimos/metrics/provider.rb +4 -4
  34. data/lib/deimos/producer.rb +39 -116
  35. data/lib/deimos/railtie.rb +6 -0
  36. data/lib/deimos/schema_backends/avro_base.rb +21 -21
  37. data/lib/deimos/schema_backends/avro_schema_registry.rb +1 -2
  38. data/lib/deimos/schema_backends/avro_validation.rb +2 -2
  39. data/lib/deimos/schema_backends/base.rb +19 -12
  40. data/lib/deimos/schema_backends/mock.rb +6 -1
  41. data/lib/deimos/schema_backends/plain.rb +47 -0
  42. data/lib/deimos/schema_class/base.rb +2 -2
  43. data/lib/deimos/schema_class/enum.rb +1 -1
  44. data/lib/deimos/schema_class/record.rb +2 -2
  45. data/lib/deimos/test_helpers.rb +95 -320
  46. data/lib/deimos/tracing/provider.rb +6 -6
  47. data/lib/deimos/transcoder.rb +88 -0
  48. data/lib/deimos/utils/db_poller/base.rb +16 -14
  49. data/lib/deimos/utils/db_poller/state_based.rb +3 -3
  50. data/lib/deimos/utils/db_poller/time_based.rb +4 -4
  51. data/lib/deimos/utils/db_poller.rb +1 -1
  52. data/lib/deimos/utils/deadlock_retry.rb +1 -1
  53. data/lib/deimos/utils/{db_producer.rb → outbox_producer.rb} +16 -47
  54. data/lib/deimos/utils/schema_class.rb +0 -7
  55. data/lib/deimos/version.rb +1 -1
  56. data/lib/deimos.rb +79 -26
  57. data/lib/generators/deimos/{db_backend_generator.rb → outbox_backend_generator.rb} +4 -4
  58. data/lib/generators/deimos/schema_class_generator.rb +0 -1
  59. data/lib/generators/deimos/v2/templates/karafka.rb.tt +149 -0
  60. data/lib/generators/deimos/v2_generator.rb +193 -0
  61. data/lib/tasks/deimos.rake +5 -7
  62. data/spec/active_record_batch_consumer_association_spec.rb +22 -13
  63. data/spec/active_record_batch_consumer_spec.rb +84 -65
  64. data/spec/active_record_consume/batch_consumption_spec.rb +10 -10
  65. data/spec/active_record_consume/batch_slicer_spec.rb +12 -12
  66. data/spec/active_record_consumer_spec.rb +29 -13
  67. data/spec/active_record_producer_spec.rb +36 -26
  68. data/spec/backends/base_spec.rb +0 -23
  69. data/spec/backends/kafka_async_spec.rb +1 -3
  70. data/spec/backends/kafka_spec.rb +1 -3
  71. data/spec/backends/{db_spec.rb → outbox_spec.rb} +14 -20
  72. data/spec/batch_consumer_spec.rb +66 -116
  73. data/spec/consumer_spec.rb +53 -147
  74. data/spec/deimos_spec.rb +10 -126
  75. data/spec/kafka_source_spec.rb +19 -52
  76. data/spec/karafka/karafka.rb +69 -0
  77. data/spec/karafka_config/karafka_spec.rb +97 -0
  78. data/spec/logging_spec.rb +25 -0
  79. data/spec/message_spec.rb +9 -9
  80. data/spec/producer_spec.rb +112 -254
  81. data/spec/rake_spec.rb +1 -3
  82. data/spec/schema_backends/avro_validation_spec.rb +1 -1
  83. data/spec/schemas/com/my-namespace/MySchemaWithTitle.avsc +22 -0
  84. data/spec/snapshots/consumers-no-nest.snap +49 -0
  85. data/spec/snapshots/consumers.snap +49 -0
  86. data/spec/snapshots/consumers_and_producers-no-nest.snap +49 -0
  87. data/spec/snapshots/consumers_and_producers.snap +49 -0
  88. data/spec/snapshots/consumers_circular-no-nest.snap +49 -0
  89. data/spec/snapshots/consumers_circular.snap +49 -0
  90. data/spec/snapshots/consumers_complex_types-no-nest.snap +49 -0
  91. data/spec/snapshots/consumers_complex_types.snap +49 -0
  92. data/spec/snapshots/consumers_nested-no-nest.snap +49 -0
  93. data/spec/snapshots/consumers_nested.snap +49 -0
  94. data/spec/snapshots/namespace_folders.snap +49 -0
  95. data/spec/snapshots/namespace_map.snap +49 -0
  96. data/spec/snapshots/producers_with_key-no-nest.snap +49 -0
  97. data/spec/snapshots/producers_with_key.snap +49 -0
  98. data/spec/spec_helper.rb +61 -29
  99. data/spec/utils/db_poller_spec.rb +49 -39
  100. data/spec/utils/{db_producer_spec.rb → outbox_producer_spec.rb} +17 -184
  101. metadata +58 -67
  102. data/lib/deimos/batch_consumer.rb +0 -7
  103. data/lib/deimos/config/phobos_config.rb +0 -164
  104. data/lib/deimos/instrumentation.rb +0 -95
  105. data/lib/deimos/monkey_patches/phobos_cli.rb +0 -35
  106. data/lib/deimos/utils/inline_consumer.rb +0 -158
  107. data/lib/deimos/utils/lag_reporter.rb +0 -186
  108. data/lib/deimos/utils/schema_controller_mixin.rb +0 -129
  109. data/spec/config/configuration_spec.rb +0 -329
  110. data/spec/kafka_listener_spec.rb +0 -55
  111. data/spec/phobos.bad_db.yml +0 -73
  112. data/spec/phobos.yml +0 -77
  113. data/spec/utils/inline_consumer_spec.rb +0 -31
  114. data/spec/utils/lag_reporter_spec.rb +0 -76
  115. data/spec/utils/platform_schema_validation_spec.rb +0 -0
  116. data/spec/utils/schema_controller_mixin_spec.rb +0 -84
  117. /data/lib/generators/deimos/{db_backend → outbox_backend}/templates/migration +0 -0
  118. /data/lib/generators/deimos/{db_backend → outbox_backend}/templates/rails3_migration +0 -0
@@ -1,329 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- # Mock consumer
4
- class MyConfigConsumer < Deimos::Consumer
5
- # :no-doc:
6
- def consume
7
- end
8
- end
9
-
10
- # Mock consumer 2
11
- class MyConfigConsumer2 < Deimos::Consumer
12
- # :no-doc:
13
- def consume
14
- end
15
- end
16
-
17
- describe Deimos, 'configuration' do
18
- it 'should configure with deprecated fields' do
19
- logger = Logger.new(nil)
20
- described_class.configure do
21
- kafka_logger logger
22
- reraise_consumer_errors true
23
- schema_registry_url 'http://schema.registry'
24
- schema.use_schema_classes false
25
- seed_broker 'whatever'
26
- schema_path 'some_path'
27
- producer_schema_namespace 'namespace'
28
- producer_topic_prefix 'prefix'
29
- disable_producers true
30
- ssl_enabled true
31
- ssl_ca_cert 'cert'
32
- ssl_client_cert 'cert'
33
- ssl_client_cert_key 'key'
34
- publish_backend 'db'
35
- report_lag true
36
- end
37
-
38
- expect(described_class.config.kafka.logger).to eq(logger)
39
- expect(described_class.config.consumers.reraise_errors).to eq(true)
40
- expect(described_class.config.schema.registry_url).to eq('http://schema.registry')
41
- expect(described_class.config.schema.use_schema_classes).to eq(false)
42
- expect(described_class.config.kafka.seed_brokers).to eq('whatever')
43
- expect(described_class.config.producers.schema_namespace).to eq('namespace')
44
- expect(described_class.config.producers.topic_prefix).to eq('prefix')
45
- expect(described_class.config.producers.disabled).to eq(true)
46
- expect(described_class.config.kafka.ssl.enabled).to eq(true)
47
- expect(described_class.config.kafka.ssl.ca_cert).to eq('cert')
48
- expect(described_class.config.kafka.ssl.client_cert).to eq('cert')
49
- expect(described_class.config.kafka.ssl.client_cert_key).to eq('key')
50
- expect(described_class.config.producers.backend).to eq('db')
51
- expect(described_class.config.consumers.report_lag).to eq(true)
52
- end
53
-
54
- it 'reads existing Phobos config YML files' do
55
- described_class.config.reset!
56
- described_class.configure { |c| c.phobos_config_file = File.join(File.dirname(__FILE__), '..', 'phobos.yml') }
57
- expect(described_class.config.phobos_config).to match(
58
- logger: an_instance_of(Logger),
59
- backoff: { min_ms: 1000, max_ms: 60_000 },
60
- consumer: {
61
- session_timeout: 300,
62
- offset_commit_interval: 10,
63
- offset_commit_threshold: 0,
64
- heartbeat_interval: 10
65
- },
66
- custom_kafka_logger: an_instance_of(Logger),
67
- custom_logger: an_instance_of(Logger),
68
- kafka: {
69
- client_id: 'phobos',
70
- connect_timeout: 15,
71
- socket_timeout: 15,
72
- ssl_verify_hostname: true,
73
- ssl_ca_certs_from_system: false,
74
- seed_brokers: ['localhost:9092']
75
- },
76
- listeners: [
77
- {
78
- topic: 'my_consume_topic',
79
- group_id: 'my_group_id',
80
- max_concurrency: 1,
81
- start_from_beginning: true,
82
- max_bytes_per_partition: 524_288,
83
- min_bytes: 1,
84
- max_wait_time: 5,
85
- force_encoding: nil,
86
- delivery: 'batch',
87
- session_timeout: 300,
88
- offset_commit_interval: 10,
89
- offset_commit_threshold: 0,
90
- offset_retention_time: nil,
91
- heartbeat_interval: 10,
92
- handler: 'ConsumerTest::MyConsumer',
93
- use_schema_classes: nil,
94
- max_db_batch_size: nil,
95
- bulk_import_id_generator: nil,
96
- save_associations_first: false
97
- }, {
98
- topic: 'my_batch_consume_topic',
99
- group_id: 'my_batch_group_id',
100
- max_concurrency: 1,
101
- start_from_beginning: true,
102
- max_bytes_per_partition: 500.kilobytes,
103
- min_bytes: 1,
104
- max_wait_time: 5,
105
- force_encoding: nil,
106
- delivery: 'inline_batch',
107
- session_timeout: 300,
108
- offset_commit_interval: 10,
109
- offset_commit_threshold: 0,
110
- offset_retention_time: nil,
111
- heartbeat_interval: 10,
112
- handler: 'ConsumerTest::MyBatchConsumer',
113
- use_schema_classes: nil,
114
- max_db_batch_size: nil,
115
- bulk_import_id_generator: nil,
116
- save_associations_first: false
117
- }
118
- ],
119
- producer: {
120
- ack_timeout: 5,
121
- required_acks: :all,
122
- max_retries: 2,
123
- retry_backoff: 1,
124
- max_buffer_size: 10_000,
125
- max_buffer_bytesize: 10_000_000,
126
- compression_codec: nil,
127
- compression_threshold: 1,
128
- max_queue_size: 10_000,
129
- delivery_threshold: 0,
130
- delivery_interval: 0,
131
- persistent_connections: false
132
- }
133
- )
134
- end
135
-
136
- specify '#phobos_config' do
137
- logger1 = Logger.new(nil)
138
- logger2 = Logger.new(nil)
139
- described_class.config.reset!
140
- described_class.configure do
141
- phobos_logger logger1
142
- kafka do
143
- logger logger2
144
- seed_brokers 'my-seed-brokers'
145
- client_id 'phobos2'
146
- connect_timeout 30
147
- socket_timeout 30
148
- ssl.enabled(true)
149
- ssl.ca_certs_from_system(true)
150
- ssl.ca_cert('cert')
151
- ssl.client_cert('cert')
152
- ssl.client_cert_key('key')
153
- ssl.verify_hostname(false)
154
- sasl.enabled true
155
- sasl.gssapi_principal 'gssapi_principal'
156
- sasl.gssapi_keytab 'gssapi_keytab'
157
- sasl.plain_authzid 'plain_authzid'
158
- sasl.plain_username 'plain_username'
159
- sasl.plain_password 'plain_password'
160
- sasl.scram_username 'scram_username'
161
- sasl.scram_password 'scram_password'
162
- sasl.scram_mechanism 'scram_mechanism'
163
- sasl.enforce_ssl true
164
- sasl.oauth_token_provider 'oauth_token_provider'
165
- end
166
- consumers do
167
- session_timeout 30
168
- offset_commit_interval 5
169
- offset_commit_threshold 0
170
- heartbeat_interval 5
171
- backoff 5..10
172
- end
173
- producers do
174
- ack_timeout 3
175
- required_acks 1
176
- max_retries 1
177
- retry_backoff 2
178
- max_buffer_size 5
179
- max_buffer_bytesize 5
180
- compression_codec :snappy
181
- compression_threshold 2
182
- max_queue_size 10
183
- delivery_threshold 1
184
- delivery_interval 1
185
- persistent_connections true
186
- end
187
- consumer do
188
- class_name 'MyConfigConsumer'
189
- schema 'blah'
190
- topic 'blah'
191
- group_id 'myconsumerid'
192
- max_concurrency 1
193
- start_from_beginning true
194
- max_bytes_per_partition 10
195
- min_bytes 5
196
- max_wait_time 5
197
- force_encoding true
198
- delivery :message
199
- backoff 100..200
200
- session_timeout 10
201
- offset_commit_interval 13
202
- offset_commit_threshold 13
203
- offset_retention_time 13
204
- heartbeat_interval 13
205
- use_schema_classes false
206
- end
207
- consumer do
208
- disabled true
209
- class_name 'MyConfigConsumer2'
210
- schema 'blah2'
211
- topic 'blah2'
212
- group_id 'myconsumerid2'
213
- use_schema_classes false
214
- end
215
- end
216
-
217
- expect(described_class.config.phobos_config).
218
- to match(
219
- logger: an_instance_of(Logger),
220
- backoff: { min_ms: 5, max_ms: 10 },
221
- consumer: {
222
- session_timeout: 30,
223
- offset_commit_interval: 5,
224
- offset_commit_threshold: 0,
225
- heartbeat_interval: 5
226
- },
227
- custom_kafka_logger: logger2,
228
- custom_logger: logger1,
229
- kafka: {
230
- client_id: 'phobos2',
231
- connect_timeout: 30,
232
- socket_timeout: 30,
233
- ssl_ca_certs_from_system: true,
234
- ssl_ca_cert: 'cert',
235
- ssl_client_cert: 'cert',
236
- ssl_client_cert_key: 'key',
237
- ssl_verify_hostname: false,
238
- seed_brokers: ['my-seed-brokers'],
239
- sasl_gssapi_principal: 'gssapi_principal',
240
- sasl_gssapi_keytab: 'gssapi_keytab',
241
- sasl_plain_authzid: 'plain_authzid',
242
- sasl_plain_username: 'plain_username',
243
- sasl_plain_password: 'plain_password',
244
- sasl_scram_username: 'scram_username',
245
- sasl_scram_password: 'scram_password',
246
- sasl_scram_mechanism: 'scram_mechanism',
247
- sasl_over_ssl: true,
248
- sasl_oauth_token_provider: 'oauth_token_provider',
249
- },
250
- listeners: [
251
- {
252
- topic: 'blah',
253
- group_id: 'myconsumerid',
254
- max_concurrency: 1,
255
- start_from_beginning: true,
256
- max_bytes_per_partition: 10,
257
- min_bytes: 5,
258
- max_wait_time: 5,
259
- force_encoding: true,
260
- delivery: 'message',
261
- backoff: { min_ms: 100, max_ms: 200 },
262
- session_timeout: 10,
263
- offset_commit_interval: 13,
264
- offset_commit_threshold: 13,
265
- offset_retention_time: 13,
266
- heartbeat_interval: 13,
267
- handler: 'MyConfigConsumer',
268
- use_schema_classes: false,
269
- max_db_batch_size: nil,
270
- bulk_import_id_generator: nil,
271
- save_associations_first: false
272
- }
273
- ],
274
- producer: {
275
- ack_timeout: 3,
276
- required_acks: 1,
277
- max_retries: 1,
278
- retry_backoff: 2,
279
- max_buffer_size: 5,
280
- max_buffer_bytesize: 5,
281
- compression_codec: :snappy,
282
- compression_threshold: 2,
283
- max_queue_size: 10,
284
- delivery_threshold: 1,
285
- delivery_interval: 1,
286
- persistent_connections: true
287
- }
288
- )
289
- end
290
-
291
- it 'should override global configurations' do
292
- described_class.configure do
293
- consumers.bulk_import_id_generator(-> { 'global' })
294
- consumers.replace_associations true
295
-
296
- consumer do
297
- class_name 'MyConfigConsumer'
298
- schema 'blah'
299
- topic 'blah'
300
- group_id 'myconsumerid'
301
- bulk_import_id_generator(-> { 'consumer' })
302
- replace_associations false
303
- save_associations_first true
304
- end
305
-
306
- consumer do
307
- class_name 'MyConfigConsumer2'
308
- schema 'blah'
309
- topic 'blah'
310
- group_id 'myconsumerid'
311
- end
312
- end
313
-
314
- consumers = described_class.config.consumers
315
- expect(consumers.replace_associations).to eq(true)
316
- expect(consumers.bulk_import_id_generator.call).to eq('global')
317
-
318
- custom = MyConfigConsumer.config
319
- expect(custom[:replace_associations]).to eq(false)
320
- expect(custom[:bulk_import_id_generator].call).to eq('consumer')
321
- expect(custom[:save_associations_first]).to eq(true)
322
-
323
- default = MyConfigConsumer2.config
324
- expect(default[:replace_associations]).to eq(true)
325
- expect(default[:bulk_import_id_generator].call).to eq('global')
326
- expect(default[:save_associations_first]).to eq(false)
327
-
328
- end
329
- end
@@ -1,55 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- describe Deimos::KafkaListener do
4
- include_context 'with widgets'
5
-
6
- prepend_before(:each) do
7
- producer_class = Class.new(Deimos::Producer) do
8
- schema 'MySchema'
9
- namespace 'com.my-namespace'
10
- topic 'my-topic'
11
- key_config none: true
12
- end
13
- stub_const('MyProducer', producer_class)
14
- end
15
-
16
- before(:each) do
17
- Deimos.configure do |c|
18
- c.producers.backend = :kafka
19
- c.schema.backend = :avro_local
20
- end
21
- allow_any_instance_of(Kafka::Cluster).to receive(:add_target_topics)
22
- allow_any_instance_of(Kafka::Cluster).to receive(:partitions_for).
23
- and_raise(Kafka::Error)
24
- end
25
-
26
- describe '.send_produce_error' do
27
- let(:payloads) do
28
- [{ 'test_id' => 'foo', 'some_int' => 123 },
29
- { 'test_id' => 'bar', 'some_int' => 124 }]
30
- end
31
-
32
- it 'should listen to publishing errors and republish as Deimos events' do
33
- allow(Deimos::Producer).to receive(:descendants).and_return([MyProducer])
34
- Deimos.subscribe('produce_error') do |event|
35
- expect(event.payload).to include(
36
- producer: MyProducer,
37
- topic: 'my-topic',
38
- payloads: payloads
39
- )
40
- end
41
- expect(Deimos.config.metrics).to receive(:increment).
42
- with('publish_error', tags: %w(topic:my-topic), by: 2)
43
- expect { MyProducer.publish_list(payloads) }.to raise_error(Kafka::DeliveryFailed)
44
- end
45
-
46
- it 'should not send any notifications when producer is not found' do
47
- Deimos.subscribe('produce_error') do |_|
48
- raise 'OH NOES'
49
- end
50
- allow(Deimos::Producer).to receive(:descendants).and_return([])
51
- expect(Deimos.config.metrics).not_to receive(:increment).with('publish_error', anything)
52
- expect { MyProducer.publish_list(payloads) }.to raise_error(Kafka::DeliveryFailed)
53
- end
54
- end
55
- end
@@ -1,73 +0,0 @@
1
- logger:
2
- # Optional log file, set to false or remove to disable it
3
- file: log/phobos.log
4
- # Optional output format for stdout, default is false (human readable).
5
- # Set to true to enable json output.
6
- stdout_json: false
7
- level: debug
8
- # Comment the block to disable ruby-kafka logs
9
- ruby_kafka:
10
- level: debug
11
-
12
- kafka:
13
- # identifier for this application
14
- client_id: phobos
15
- # timeout setting for connecting to brokers
16
- connect_timeout: 15
17
- # timeout setting for socket connections
18
- socket_timeout: 15
19
-
20
- producer:
21
- # number of seconds a broker can wait for replicas to acknowledge
22
- # a write before responding with a timeout
23
- ack_timeout: 5
24
- # number of replicas that must acknowledge a write, or `:all`
25
- # if all in-sync replicas must acknowledge
26
- required_acks: 1
27
- # number of retries that should be attempted before giving up sending
28
- # messages to the cluster. Does not include the original attempt
29
- max_retries: 2
30
- # number of seconds to wait between retries
31
- retry_backoff: 1
32
- # number of messages allowed in the buffer before new writes will
33
- # raise {BufferOverflow} exceptions
34
- max_buffer_size: 10000
35
- # maximum size of the buffer in bytes. Attempting to produce messages
36
- # when the buffer reaches this size will result in {BufferOverflow} being raised
37
- max_buffer_bytesize: 10000000
38
- # name of the compression codec to use, or nil if no compression should be performed.
39
- # Valid codecs: `:snappy` and `:gzip`
40
- compression_codec:
41
- # number of messages that needs to be in a message set before it should be compressed.
42
- # Note that message sets are per-partition rather than per-topic or per-producer
43
- compression_threshold: 1
44
- # maximum number of messages allowed in the queue. Only used for async_producer
45
- max_queue_size: 10000
46
- # if greater than zero, the number of buffered messages that will automatically
47
- # trigger a delivery. Only used for async_producer
48
- delivery_threshold: 0
49
- # if greater than zero, the number of seconds between automatic message
50
- # deliveries. Only used for async_producer
51
- delivery_interval: 0
52
-
53
- consumer:
54
- # number of seconds after which, if a client hasn't contacted the Kafka cluster,
55
- # it will be kicked out of the group
56
- session_timeout: 300
57
- # interval between offset commits, in seconds
58
- offset_commit_interval: 10
59
- # number of messages that can be processed before their offsets are committed.
60
- # If zero, offset commits are not triggered by message processing
61
- offset_commit_threshold: 0
62
- # interval between heartbeats; must be less than the session window
63
- heartbeat_interval: 10
64
-
65
- backoff:
66
- min_ms: 1000
67
- max_ms: 60000
68
-
69
- listeners:
70
- - handler: ConsumerTest::MyConsumer
71
- topic: my_consume_topic
72
- group_id: my_group_id
73
- max_bytes_per_partition: 524288 # 512 KB
data/spec/phobos.yml DELETED
@@ -1,77 +0,0 @@
1
- logger:
2
- # Optional log file, set to false or remove to disable it
3
- file: log/phobos.log
4
- # Optional output format for stdout, default is false (human readable).
5
- # Set to true to enable json output.
6
- stdout_json: false
7
- level: debug
8
- # Comment the block to disable ruby-kafka logs
9
- ruby_kafka:
10
- level: debug
11
-
12
- kafka:
13
- # identifier for this application
14
- client_id: phobos
15
- # timeout setting for connecting to brokers
16
- connect_timeout: 15
17
- # timeout setting for socket connections
18
- socket_timeout: 15
19
-
20
- producer:
21
- # number of seconds a broker can wait for replicas to acknowledge
22
- # a write before responding with a timeout
23
- ack_timeout: 5
24
- # number of replicas that must acknowledge a write, or `:all`
25
- # if all in-sync replicas must acknowledge
26
- required_acks: :all
27
- # number of retries that should be attempted before giving up sending
28
- # messages to the cluster. Does not include the original attempt
29
- max_retries: 2
30
- # number of seconds to wait between retries
31
- retry_backoff: 1
32
- # number of messages allowed in the buffer before new writes will
33
- # raise {BufferOverflow} exceptions
34
- max_buffer_size: 10000
35
- # maximum size of the buffer in bytes. Attempting to produce messages
36
- # when the buffer reaches this size will result in {BufferOverflow} being raised
37
- max_buffer_bytesize: 10000000
38
- # name of the compression codec to use, or nil if no compression should be performed.
39
- # Valid codecs: `:snappy` and `:gzip`
40
- compression_codec:
41
- # number of messages that needs to be in a message set before it should be compressed.
42
- # Note that message sets are per-partition rather than per-topic or per-producer
43
- compression_threshold: 1
44
- # maximum number of messages allowed in the queue. Only used for async_producer
45
- max_queue_size: 10000
46
- # if greater than zero, the number of buffered messages that will automatically
47
- # trigger a delivery. Only used for async_producer
48
- delivery_threshold: 0
49
- # if greater than zero, the number of seconds between automatic message
50
- # deliveries. Only used for async_producer
51
- delivery_interval: 0
52
-
53
- consumer:
54
- # number of seconds after which, if a client hasn't contacted the Kafka cluster,
55
- # it will be kicked out of the group
56
- session_timeout: 300
57
- # interval between offset commits, in seconds
58
- offset_commit_interval: 10
59
- # number of messages that can be processed before their offsets are committed.
60
- # If zero, offset commits are not triggered by message processing
61
- offset_commit_threshold: 0
62
- # interval between heartbeats; must be less than the session window
63
- heartbeat_interval: 10
64
-
65
- backoff:
66
- min_ms: 1000
67
- max_ms: 60000
68
-
69
- listeners:
70
- - handler: ConsumerTest::MyConsumer
71
- topic: my_consume_topic
72
- group_id: my_group_id
73
- max_bytes_per_partition: 524288 # 512 KB
74
- - handler: ConsumerTest::MyBatchConsumer
75
- topic: my_batch_consume_topic
76
- group_id: my_batch_group_id
77
- delivery: inline_batch
@@ -1,31 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- describe Deimos::Utils::SeekListener do
4
-
5
- describe '#start_listener' do
6
- let(:consumer) { instance_double(Kafka::Consumer) }
7
- let(:handler) { class_double(Deimos::Utils::MessageBankHandler) }
8
-
9
- before(:each) do
10
- allow(handler).to receive(:start)
11
- allow(consumer).to receive(:subscribe)
12
- allow_any_instance_of(Phobos::Listener).to receive(:create_kafka_consumer).and_return(consumer)
13
- allow_any_instance_of(Kafka::Client).to receive(:last_offset_for).and_return(100)
14
- stub_const('Deimos::Utils::SeekListener::MAX_SEEK_RETRIES', 2)
15
- end
16
-
17
- it 'should seek offset' do
18
- allow(consumer).to receive(:seek)
19
- expect(consumer).to receive(:seek).once
20
- seek_listener = described_class.new(handler: handler, group_id: 999, topic: 'test_topic')
21
- seek_listener.start_listener
22
- end
23
-
24
- it 'should retry on errors when seeking offset' do
25
- allow(consumer).to receive(:seek).and_raise(StandardError)
26
- expect(consumer).to receive(:seek).twice
27
- seek_listener = described_class.new(handler: handler, group_id: 999, topic: 'test_topic')
28
- seek_listener.start_listener
29
- end
30
- end
31
- end
@@ -1,76 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- describe Deimos::Utils::LagReporter do
4
-
5
- let(:kafka_client) { instance_double(Kafka::Client) }
6
- let(:partition1_tags) { %w(consumer_group:group1 partition:1 topic:my-topic) }
7
- let(:partition2_tags) { %w(consumer_group:group1 partition:2 topic:my-topic) }
8
-
9
- before(:each) do
10
- allow(kafka_client).to receive(:last_offset_for).and_return(100)
11
- allow(Phobos).to receive(:create_kafka_client).and_return(kafka_client)
12
- Deimos.configure { |c| c.consumers.report_lag = true }
13
- end
14
-
15
- after(:each) do
16
- described_class.reset
17
- Deimos.configure { |c| c.consumers.report_lag = false }
18
- end
19
-
20
- it 'should not report lag before ready' do
21
- expect(Deimos.config.metrics).not_to receive(:gauge)
22
- ActiveSupport::Notifications.instrument(
23
- 'heartbeat.consumer.kafka',
24
- group_id: 'group1', topic_partitions: { 'my-topic': [1] }
25
- )
26
- end
27
-
28
- it 'should report lag' do
29
- expect(Deimos.config.metrics).to receive(:gauge).ordered.twice.
30
- with('consumer_lag', 95, tags: partition1_tags)
31
- expect(Deimos.config.metrics).to receive(:gauge).ordered.once.
32
- with('consumer_lag', 80, tags: partition2_tags)
33
- expect(Deimos.config.metrics).to receive(:gauge).ordered.once.
34
- with('consumer_lag', 0, tags: partition2_tags)
35
- ActiveSupport::Notifications.instrument(
36
- 'seek.consumer.kafka',
37
- offset: 5, topic: 'my-topic', group_id: 'group1', partition: 1
38
- )
39
- ActiveSupport::Notifications.instrument(
40
- 'start_process_message.consumer.kafka',
41
- offset: 20, topic: 'my-topic', group_id: 'group1', partition: 2
42
- )
43
- ActiveSupport::Notifications.instrument(
44
- 'heartbeat.consumer.kafka',
45
- group_id: 'group1', topic_partitions: { 'my-topic': [1, 2] }
46
- )
47
- ActiveSupport::Notifications.instrument(
48
- 'start_process_batch.consumer.kafka',
49
- last_offset: 100, topic: 'my-topic', group_id: 'group1', partition: 2
50
- )
51
- ActiveSupport::Notifications.instrument(
52
- 'heartbeat.consumer.kafka',
53
- group_id: 'group1', topic_partitions: { 'my-topic': [1, 2] }
54
- )
55
- end
56
-
57
- it 'should update lag after heartbeat' do
58
- expect(Deimos.config.metrics).to receive(:gauge).ordered.once.
59
- with('consumer_lag', 94, tags: partition2_tags)
60
- expect(Deimos.config.metrics).to receive(:gauge).ordered.once.
61
- with('consumer_lag', 95, tags: partition2_tags)
62
- ActiveSupport::Notifications.instrument(
63
- 'seek.consumer.kafka',
64
- offset: 6, topic: 'my-topic', group_id: 'group1', partition: 2
65
- )
66
- ActiveSupport::Notifications.instrument(
67
- 'heartbeat.consumer.kafka',
68
- group_id: 'group1', topic_partitions: { 'my-topic': [1, 2] }
69
- )
70
- allow(kafka_client).to receive(:last_offset_for).and_return(101)
71
- ActiveSupport::Notifications.instrument(
72
- 'heartbeat.consumer.kafka',
73
- group_id: 'group1', topic_partitions: { 'my-topic': [1, 2] }
74
- )
75
- end
76
- end
File without changes