deimos-ruby 1.1.0.pre.beta2 → 1.2.0.pre.beta1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +7 -0
- data/Gemfile.lock +1 -1
- data/README.md +52 -8
- data/lib/deimos.rb +2 -1
- data/lib/deimos/base_consumer.rb +17 -5
- data/lib/deimos/configuration.rb +34 -0
- data/lib/deimos/kafka_message.rb +17 -13
- data/lib/deimos/utils/db_producer.rb +68 -23
- data/lib/deimos/version.rb +1 -1
- data/spec/consumer_spec.rb +18 -0
- data/spec/deimos_spec.rb +3 -3
- data/spec/utils/db_producer_spec.rb +114 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 5d27374b4f6388c2eae44b4b9654b10461c8fd7d044f5a67a63d7b1866b8658f
|
4
|
+
data.tar.gz: 1b058d23b6e5bbab471b57f74d0f85c3cd3c78131c9b43a32494f1b95b3c9110
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: fb7e58230650beaec9d777a8ee8e33a9210b08ef05d59f64e6e3a9c5f8cfb095af72f15a443063998d3c6fd579263f1b25d0409b371200fbe9d95bee43ab0300
|
7
|
+
data.tar.gz: a6198b80dd040db5f2b633fc5e8c83da727f6d9e0d9915684df00f9f1e55f73fec99ed8ff482ac5f37378d1ebae18ebf7d9d24b6b89051625274b2f70b9cfbc4
|
data/CHANGELOG.md
CHANGED
@@ -7,6 +7,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|
7
7
|
|
8
8
|
## UNRELEASED
|
9
9
|
|
10
|
+
# [1.2.0-beta1] - 2019-09-12
|
11
|
+
- Added `fatal_error` to both global config and consumer classes.
|
12
|
+
- Changed `pending_db_messages_max_wait` metric to send per topic.
|
13
|
+
- Added config to compact messages in the DB producer.
|
14
|
+
- Added config to log messages in the DB producer.
|
15
|
+
- Added config to provide a separate logger to the DB producer.
|
16
|
+
|
10
17
|
# [1.1.0-beta2] - 2019-09-11
|
11
18
|
- Fixed bug where ActiveRecordConsumer was not using `unscoped` to update
|
12
19
|
via primary key and causing duplicate record errors.
|
data/Gemfile.lock
CHANGED
data/README.md
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
<img src="support/deimos-with-name.png" title="Deimos logo"/>
|
3
3
|
<br/>
|
4
4
|
<img src="https://img.shields.io/circleci/build/github/flipp-oss/deimos.svg" alt="CircleCI"/>
|
5
|
-
<a href="https://badge.fury.io/rb/deimos"><img src="https://badge.fury.io/rb/deimos.svg" alt="Gem Version" height="18"></a>
|
5
|
+
<a href="https://badge.fury.io/rb/deimos-ruby"><img src="https://badge.fury.io/rb/deimos-ruby.svg" alt="Gem Version" height="18"></a>
|
6
6
|
<img src="https://img.shields.io/codeclimate/maintainability/flipp-oss/deimos.svg"/>
|
7
7
|
</p>
|
8
8
|
|
@@ -46,7 +46,7 @@ Or install it yourself as:
|
|
46
46
|
|
47
47
|
# Versioning
|
48
48
|
|
49
|
-
We use version of semver for this gem. Any change in previous behavior
|
49
|
+
We use a version of semver for this gem. Any change in previous behavior
|
50
50
|
(something works differently or something old no longer works)
|
51
51
|
is denoted with a bump in the minor version (0.4 -> 0.5). Patch versions
|
52
52
|
are for bugfixes or new functionality which does not affect existing code. You
|
@@ -98,17 +98,38 @@ Deimos.configure do |config|
|
|
98
98
|
# be able to proceed past it and will be stuck forever until you fix
|
99
99
|
# your code.
|
100
100
|
config.reraise_consumer_errors = true
|
101
|
+
|
102
|
+
# Another way to handle errors is to set reraise_consumer_errors to false
|
103
|
+
# but to set a global "fatal error" block that determines when to reraise:
|
104
|
+
config.fatal_error do |exception, payload, metadata|
|
105
|
+
exception.is_a?(BadError)
|
106
|
+
end
|
107
|
+
# Another example would be to check the database connection and fail
|
108
|
+
# if the DB is down entirely.
|
101
109
|
|
102
110
|
# Set to true to send consumer lag metrics
|
103
111
|
config.report_lag = %w(production staging).include?(Rails.env)
|
104
112
|
|
105
113
|
# Change the default backend. See Backends, below.
|
106
114
|
config.backend = :db
|
107
|
-
|
108
|
-
#
|
109
|
-
|
110
|
-
|
111
|
-
|
115
|
+
|
116
|
+
# Database Backend producer configuration
|
117
|
+
|
118
|
+
# Logger for DB producer
|
119
|
+
config.db_producer.logger = Logger.new('/db_producer.log')
|
120
|
+
|
121
|
+
# List of topics to print full messages for, or :all to print all
|
122
|
+
# topics. This can introduce slowdown since it needs to decode
|
123
|
+
# each message using the schema registry.
|
124
|
+
config.db_producer.log_topics = ['topic1', 'topic2']
|
125
|
+
|
126
|
+
# List of topics to compact before sending, i.e. only send the
|
127
|
+
# last message with any given key in a batch. This is an optimization
|
128
|
+
# which mirrors what Kafka itself will do with compaction turned on
|
129
|
+
# but only within a single batch. You can also specify :all to
|
130
|
+
# compact all topics.
|
131
|
+
config.db_producer.compact_topics = ['topic1', 'topic2']
|
132
|
+
|
112
133
|
# Configure the metrics provider (see below).
|
113
134
|
config.metrics = Deimos::Metrics::Mock.new({ tags: %w(env:prod my_tag:another_1) })
|
114
135
|
|
@@ -333,6 +354,13 @@ class MyConsumer < Deimos::Consumer
|
|
333
354
|
# `schema` and `namespace`, above, for this to work.
|
334
355
|
key_config field: :my_id
|
335
356
|
|
357
|
+
# Optionally overload this to consider a particular exception
|
358
|
+
# "fatal" only for this consumer. This is considered in addition
|
359
|
+
# to the global `fatal_error` configuration block.
|
360
|
+
def fatal_error?(exception, payload, metadata)
|
361
|
+
exception.is_a?(MyBadError)
|
362
|
+
end
|
363
|
+
|
336
364
|
def consume(payload, metadata)
|
337
365
|
# Same method as Phobos consumers.
|
338
366
|
# payload is an Avro-decoded hash.
|
@@ -344,6 +372,20 @@ class MyConsumer < Deimos::Consumer
|
|
344
372
|
end
|
345
373
|
```
|
346
374
|
|
375
|
+
### Fatal Errors
|
376
|
+
|
377
|
+
The recommended configuration is for consumers *not* to raise errors
|
378
|
+
they encounter while consuming messages. Errors can be come from
|
379
|
+
a variety of sources and it's possible that the message itself (or
|
380
|
+
what downstream systems are doing with it) is causing it. If you do
|
381
|
+
not continue on past this message, your consumer will essentially be
|
382
|
+
stuck forever unless you take manual action to skip the offset.
|
383
|
+
|
384
|
+
Use `config.reraise_consumer_errors = false` to swallow errors. You
|
385
|
+
can use instrumentation to handle errors you receive. You can also
|
386
|
+
specify "fatal errors" either via global configuration (`config.fatal_error`)
|
387
|
+
or via overriding a method on an individual consumer (`def fatal_error`).
|
388
|
+
|
347
389
|
### Batch Consumption
|
348
390
|
|
349
391
|
Instead of consuming messages one at a time, consumers can receive a batch of
|
@@ -636,7 +678,9 @@ The following metrics are reported:
|
|
636
678
|
to publish. Tagged with `topic:{topic_name}`
|
637
679
|
* `pending_db_messages_max_wait` - the number of seconds which the
|
638
680
|
oldest KafkaMessage in the database has been waiting for, for use
|
639
|
-
with the database backend.
|
681
|
+
with the database backend. Tagged with the topic that is waiting.
|
682
|
+
Will send a value of 0 with no topics tagged if there are no messages
|
683
|
+
waiting.
|
640
684
|
|
641
685
|
### Configuring Metrics Providers
|
642
686
|
|
data/lib/deimos.rb
CHANGED
@@ -90,7 +90,8 @@ module Deimos
|
|
90
90
|
end
|
91
91
|
|
92
92
|
producers = (1..thread_count).map do
|
93
|
-
Deimos::Utils::DbProducer.
|
93
|
+
Deimos::Utils::DbProducer.
|
94
|
+
new(self.config.db_producer.logger || self.config.logger)
|
94
95
|
end
|
95
96
|
executor = Deimos::Utils::Executor.new(producers,
|
96
97
|
sleep_seconds: 5,
|
data/lib/deimos/base_consumer.rb
CHANGED
@@ -74,13 +74,25 @@ module Deimos
|
|
74
74
|
))
|
75
75
|
end
|
76
76
|
|
77
|
-
#
|
78
|
-
#
|
79
|
-
# @param
|
80
|
-
|
77
|
+
# Overrideable method to determine if a given error should be considered
|
78
|
+
# "fatal" and always be reraised.
|
79
|
+
# @param error [Exception]
|
80
|
+
# @param payload [Hash]
|
81
|
+
# @param metadata [Hash]
|
82
|
+
# @return [Boolean]
|
83
|
+
def fatal_error?(_error, _payload, _metadata)
|
84
|
+
false
|
85
|
+
end
|
86
|
+
|
87
|
+
# @param exception [Exception]
|
88
|
+
# @param payload [Hash]
|
89
|
+
# @param metadata [Hash]
|
90
|
+
def _handle_error(exception, payload, metadata)
|
81
91
|
Deimos.config.tracer&.set_error(@span, exception)
|
82
92
|
|
83
|
-
raise if Deimos.config.reraise_consumer_errors
|
93
|
+
raise if Deimos.config.reraise_consumer_errors ||
|
94
|
+
Deimos.config.fatal_error_block.call(exception, payload, metadata) ||
|
95
|
+
fatal_error?(exception, payload, metadata)
|
84
96
|
end
|
85
97
|
|
86
98
|
# @param _time_taken [Float]
|
data/lib/deimos/configuration.rb
CHANGED
@@ -72,10 +72,26 @@ module Deimos
|
|
72
72
|
# @return [Tracing::Provider]
|
73
73
|
attr_accessor :tracer
|
74
74
|
|
75
|
+
# @return [Deimos::DbProducerConfiguration]
|
76
|
+
attr_accessor :db_producer
|
77
|
+
|
78
|
+
# For internal purposes only
|
79
|
+
# @return [Block]
|
80
|
+
attr_accessor :fatal_error_block
|
81
|
+
|
75
82
|
# :nodoc:
|
76
83
|
def initialize
|
77
84
|
@phobos_config_file = 'config/phobos.yml'
|
78
85
|
@publish_backend = :kafka_async
|
86
|
+
@db_producer = DbProducerConfiguration.new
|
87
|
+
fatal_error { false }
|
88
|
+
end
|
89
|
+
|
90
|
+
# Block taking an exception, payload and metadata and returning
|
91
|
+
# true if this should be considered a fatal error and false otherwise.
|
92
|
+
# Not needed if reraise_consumer_errors is set to true.
|
93
|
+
def fatal_error(&block)
|
94
|
+
@fatal_error_block = block
|
79
95
|
end
|
80
96
|
|
81
97
|
# @param other_config [Configuration]
|
@@ -87,4 +103,22 @@ module Deimos
|
|
87
103
|
other_config.logger != self.logger
|
88
104
|
end
|
89
105
|
end
|
106
|
+
|
107
|
+
# Sub-class for DB producer configs.
|
108
|
+
class DbProducerConfiguration
|
109
|
+
# @return [Logger]
|
110
|
+
attr_accessor :logger
|
111
|
+
# @return [Symbol|Array<String>] A list of topics to log all messages, or
|
112
|
+
# :all to log all topics.
|
113
|
+
attr_accessor :log_topics
|
114
|
+
# @return [Symbol|Array<String>] A list of topics to compact messages for
|
115
|
+
# before sending, or :all to compact all keyed messages.
|
116
|
+
attr_accessor :compact_topics
|
117
|
+
|
118
|
+
# :nodoc:
|
119
|
+
def initialize
|
120
|
+
@log_topics = []
|
121
|
+
@compact_topics = []
|
122
|
+
end
|
123
|
+
end
|
90
124
|
end
|
data/lib/deimos/kafka_message.rb
CHANGED
@@ -14,9 +14,11 @@ module Deimos
|
|
14
14
|
write_attribute(:message, mess ? mess.to_s : nil)
|
15
15
|
end
|
16
16
|
|
17
|
+
# Get a decoder to decode a set of messages on the given topic.
|
18
|
+
# @param topic [String]
|
17
19
|
# @return [Deimos::Consumer]
|
18
|
-
def decoder
|
19
|
-
producer = Deimos::Producer.descendants.find { |c| c.topic ==
|
20
|
+
def self.decoder(topic)
|
21
|
+
producer = Deimos::Producer.descendants.find { |c| c.topic == topic }
|
20
22
|
return nil unless producer
|
21
23
|
|
22
24
|
consumer = Class.new(Deimos::Consumer)
|
@@ -24,17 +26,19 @@ module Deimos
|
|
24
26
|
consumer
|
25
27
|
end
|
26
28
|
|
27
|
-
#
|
28
|
-
#
|
29
|
-
# @
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
29
|
+
# Decoded payloads for a list of messages.
|
30
|
+
# @param messages [Array<Deimos::KafkaMessage>]
|
31
|
+
# @return [Array<Hash>]
|
32
|
+
def self.decoded(messages=[])
|
33
|
+
return [] if messages.empty?
|
34
|
+
|
35
|
+
decoder = self.decoder(messages.first.topic)&.new
|
36
|
+
messages.map do |m|
|
37
|
+
{
|
38
|
+
key: m.key.present? ? decoder&.decode_key(m.key) || m.key : nil,
|
39
|
+
payload: decoder&.decoder&.decode(self.message) || self.message
|
40
|
+
}
|
41
|
+
end
|
38
42
|
end
|
39
43
|
|
40
44
|
# @return [Hash]
|
@@ -16,6 +16,11 @@ module Deimos
|
|
16
16
|
@logger.push_tags("DbProducer #{@id}") if @logger.respond_to?(:push_tags)
|
17
17
|
end
|
18
18
|
|
19
|
+
# @return [Deimos::DbProducerConfig]
|
20
|
+
def config
|
21
|
+
Deimos.config.db_producer
|
22
|
+
end
|
23
|
+
|
19
24
|
# Start the poll.
|
20
25
|
def start
|
21
26
|
@logger.info('Starting...')
|
@@ -60,44 +65,72 @@ module Deimos
|
|
60
65
|
return
|
61
66
|
end
|
62
67
|
@current_topic = topic
|
63
|
-
messages = retrieve_messages
|
64
68
|
|
65
|
-
|
66
|
-
@logger.debug do
|
67
|
-
decoder = messages.first.decoder
|
68
|
-
"DB producer: Topic #{topic} Producing messages: #{messages.map { |m| m.decoded_message(decoder) }}"
|
69
|
-
end
|
70
|
-
Deimos.instrument('db_producer.produce', topic: topic, messages: messages) do
|
71
|
-
begin
|
72
|
-
produce_messages(messages.map(&:phobos_message))
|
73
|
-
rescue Kafka::BufferOverflow, Kafka::MessageSizeTooLarge, Kafka::RecordListTooLarge
|
74
|
-
messages.each(&:delete)
|
75
|
-
raise
|
76
|
-
end
|
77
|
-
end
|
78
|
-
messages.first.class.where(id: messages.map(&:id)).delete_all
|
79
|
-
break if messages.size < BATCH_SIZE
|
69
|
+
loop { break unless process_topic_batch }
|
80
70
|
|
81
|
-
KafkaTopicInfo.heartbeat(@current_topic, @id) # keep alive
|
82
|
-
send_pending_metrics
|
83
|
-
messages = retrieve_messages
|
84
|
-
end
|
85
71
|
KafkaTopicInfo.clear_lock(@current_topic, @id)
|
86
72
|
rescue StandardError => e
|
87
73
|
@logger.error("Error processing messages for topic #{@current_topic}: #{e.class.name}: #{e.message} #{e.backtrace.join("\n")}")
|
88
74
|
KafkaTopicInfo.register_error(@current_topic, @id)
|
89
75
|
end
|
90
76
|
|
77
|
+
# Process a single batch in a topic.
|
78
|
+
def process_topic_batch
|
79
|
+
messages = retrieve_messages
|
80
|
+
return false if messages.empty?
|
81
|
+
|
82
|
+
batch_size = messages.size
|
83
|
+
compacted_messages = compact_messages(messages)
|
84
|
+
log_messages(compacted_messages)
|
85
|
+
Deimos.instrument('db_producer.produce', topic: @current_topic, messages: compacted_messages) do
|
86
|
+
begin
|
87
|
+
produce_messages(compacted_messages.map(&:phobos_message))
|
88
|
+
rescue Kafka::BufferOverflow, Kafka::MessageSizeTooLarge, Kafka::RecordListTooLarge
|
89
|
+
Deimos::KafkaMessage.where(id: messages.map(&:id)).delete_all
|
90
|
+
@logger.error('Message batch too large, deleting...')
|
91
|
+
@logger.error(Deimos::KafkaMessage.decoded(messages))
|
92
|
+
raise
|
93
|
+
end
|
94
|
+
end
|
95
|
+
Deimos::KafkaMessage.where(id: messages.map(&:id)).delete_all
|
96
|
+
return false if batch_size < BATCH_SIZE
|
97
|
+
|
98
|
+
KafkaTopicInfo.heartbeat(@current_topic, @id) # keep alive
|
99
|
+
send_pending_metrics
|
100
|
+
true
|
101
|
+
end
|
102
|
+
|
91
103
|
# @return [Array<Deimos::KafkaMessage>]
|
92
104
|
def retrieve_messages
|
93
105
|
KafkaMessage.where(topic: @current_topic).order(:id).limit(BATCH_SIZE)
|
94
106
|
end
|
95
107
|
|
108
|
+
# @param messages [Array<Deimos::KafkaMessage>]
|
109
|
+
def log_messages(messages)
|
110
|
+
return if config.log_topics != :all && !config.log_topics.include?(@current_topic)
|
111
|
+
|
112
|
+
@logger.debug do
|
113
|
+
decoded_messages = Deimos::KafkaMessage.decoded(messages)
|
114
|
+
"DB producer: Topic #{@current_topic} Producing messages: #{decoded_messages}}"
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
96
118
|
# Send metrics to Datadog.
|
97
119
|
def send_pending_metrics
|
98
|
-
|
99
|
-
|
100
|
-
|
120
|
+
metrics = Deimos.config.metrics
|
121
|
+
return unless metrics
|
122
|
+
|
123
|
+
messages = Deimos::KafkaMessage.
|
124
|
+
select('count(*) as num_messages, min(created_at) as earliest, topic').
|
125
|
+
group(:topic)
|
126
|
+
if messages.none?
|
127
|
+
metrics.gauge('pending_db_messages_max_wait', 0)
|
128
|
+
end
|
129
|
+
messages.each do |record|
|
130
|
+
time_diff = Time.zone.now - record.earliest
|
131
|
+
metrics.gauge('pending_db_messages_max_wait', time_diff,
|
132
|
+
tags: ["topic:#{record.topic}"])
|
133
|
+
end
|
101
134
|
end
|
102
135
|
|
103
136
|
# Shut down the sync producer if we have to. Phobos will automatically
|
@@ -140,6 +173,18 @@ module Deimos
|
|
140
173
|
retry
|
141
174
|
end
|
142
175
|
end
|
176
|
+
|
177
|
+
# @param batch [Array<Deimos::KafkaMessage>]
|
178
|
+
# @return [Array<Deimos::KafkaMessage>]
|
179
|
+
def compact_messages(batch)
|
180
|
+
return batch unless batch.first&.key.present?
|
181
|
+
|
182
|
+
topic = batch.first.topic
|
183
|
+
return batch if config.compact_topics != :all &&
|
184
|
+
!config.compact_topics.include?(topic)
|
185
|
+
|
186
|
+
batch.reverse.uniq!(&:key).reverse!
|
187
|
+
end
|
143
188
|
end
|
144
189
|
end
|
145
190
|
end
|
data/lib/deimos/version.rb
CHANGED
data/spec/consumer_spec.rb
CHANGED
@@ -11,6 +11,11 @@ module ConsumerTest
|
|
11
11
|
namespace 'com.my-namespace'
|
12
12
|
key_config field: 'test_id'
|
13
13
|
|
14
|
+
# :nodoc:
|
15
|
+
def fatal_error?(_exception, payload, _metadata)
|
16
|
+
payload == 'fatal'
|
17
|
+
end
|
18
|
+
|
14
19
|
# :nodoc:
|
15
20
|
def consume(_payload, _metadata)
|
16
21
|
raise 'This should not be called unless call_original is set'
|
@@ -39,6 +44,19 @@ module ConsumerTest
|
|
39
44
|
test_consume_invalid_message(MyConsumer, 'invalid' => 'key')
|
40
45
|
end
|
41
46
|
|
47
|
+
it 'should fail if reraise is false but fatal_error is true' do
|
48
|
+
Deimos.configure { |config| config.reraise_consumer_errors = false }
|
49
|
+
test_consume_invalid_message(MyConsumer, 'fatal')
|
50
|
+
end
|
51
|
+
|
52
|
+
it 'should fail if fatal_error is true globally' do
|
53
|
+
Deimos.configure do |config|
|
54
|
+
config.fatal_error { true }
|
55
|
+
config.reraise_consumer_errors = false
|
56
|
+
end
|
57
|
+
test_consume_invalid_message(MyConsumer, 'invalid' => 'key')
|
58
|
+
end
|
59
|
+
|
42
60
|
it 'should fail on message with extra fields' do
|
43
61
|
test_consume_invalid_message(MyConsumer,
|
44
62
|
'test_id' => 'foo',
|
data/spec/deimos_spec.rb
CHANGED
@@ -84,7 +84,7 @@ describe Deimos do
|
|
84
84
|
allow(described_class).to receive(:run_db_backend)
|
85
85
|
end
|
86
86
|
|
87
|
-
it 'should start if backend is db and
|
87
|
+
it 'should start if backend is db and thread_count is > 0' do
|
88
88
|
signal_handler = instance_double(Deimos::Utils::SignalHandler)
|
89
89
|
allow(signal_handler).to receive(:run!)
|
90
90
|
expect(Deimos::Utils::Executor).to receive(:new).
|
@@ -108,7 +108,7 @@ describe Deimos do
|
|
108
108
|
to raise_error('Publish backend is not set to :db, exiting')
|
109
109
|
end
|
110
110
|
|
111
|
-
it 'should not start if
|
111
|
+
it 'should not start if thread_count is nil' do
|
112
112
|
expect(Deimos::Utils::SignalHandler).not_to receive(:new)
|
113
113
|
described_class.configure do |config|
|
114
114
|
config.publish_backend = :db
|
@@ -117,7 +117,7 @@ describe Deimos do
|
|
117
117
|
to raise_error('Thread count is not given or set to zero, exiting')
|
118
118
|
end
|
119
119
|
|
120
|
-
it 'should not start if
|
120
|
+
it 'should not start if thread_count is 0' do
|
121
121
|
expect(Deimos::Utils::SignalHandler).not_to receive(:new)
|
122
122
|
described_class.configure do |config|
|
123
123
|
config.publish_backend = :db
|
@@ -93,6 +93,64 @@ each_db_config(Deimos::Utils::DbProducer) do
|
|
93
93
|
expect(phobos_producer).to have_received(:publish_list).with(['A']).once
|
94
94
|
|
95
95
|
end
|
96
|
+
|
97
|
+
describe '#compact_messages' do
|
98
|
+
let(:batch) do
|
99
|
+
[
|
100
|
+
{
|
101
|
+
key: 1,
|
102
|
+
topic: 'my-topic',
|
103
|
+
message: 'AAA'
|
104
|
+
},
|
105
|
+
{
|
106
|
+
key: 2,
|
107
|
+
topic: 'my-topic',
|
108
|
+
message: 'BBB'
|
109
|
+
},
|
110
|
+
{
|
111
|
+
key: 1,
|
112
|
+
topic: 'my-topic',
|
113
|
+
message: 'CCC'
|
114
|
+
}
|
115
|
+
].map { |h| Deimos::KafkaMessage.create!(h) }
|
116
|
+
end
|
117
|
+
|
118
|
+
let(:deduped_batch) { batch[1..2] }
|
119
|
+
|
120
|
+
it 'should dedupe messages when :all is set' do
|
121
|
+
Deimos.configure { |c| c.db_producer.compact_topics = :all }
|
122
|
+
expect(producer.compact_messages(batch)).to eq(deduped_batch)
|
123
|
+
end
|
124
|
+
|
125
|
+
it 'should dedupe messages when topic is included' do
|
126
|
+
Deimos.configure { |c| c.db_producer.compact_topics = %w(my-topic my-topic2) }
|
127
|
+
expect(producer.compact_messages(batch)).to eq(deduped_batch)
|
128
|
+
end
|
129
|
+
|
130
|
+
it 'should not dedupe messages when topic is not included' do
|
131
|
+
Deimos.configure { |c| c.db_producer.compact_topics = %w(my-topic3 my-topic2) }
|
132
|
+
expect(producer.compact_messages(batch)).to eq(batch)
|
133
|
+
end
|
134
|
+
|
135
|
+
it 'should not dedupe messages without keys' do
|
136
|
+
unkeyed_batch = [
|
137
|
+
{
|
138
|
+
key: nil,
|
139
|
+
topic: 'my-topic',
|
140
|
+
message: 'AAA'
|
141
|
+
},
|
142
|
+
{
|
143
|
+
key: nil,
|
144
|
+
topic: 'my-topic',
|
145
|
+
message: 'BBB'
|
146
|
+
}
|
147
|
+
].map { |h| Deimos::KafkaMessage.create!(h) }
|
148
|
+
Deimos.configure { |c| c.db_producer.compact_topics = :all }
|
149
|
+
expect(producer.compact_messages(unkeyed_batch)).to eq(unkeyed_batch)
|
150
|
+
Deimos.configure { |c| c.db_producer.compact_topics = [] }
|
151
|
+
end
|
152
|
+
|
153
|
+
end
|
96
154
|
end
|
97
155
|
|
98
156
|
describe '#process_topic' do
|
@@ -119,6 +177,7 @@ each_db_config(Deimos::Utils::DbProducer) do
|
|
119
177
|
with('my-topic', 'abc').and_return(true)
|
120
178
|
expect(producer).to receive(:retrieve_messages).ordered.
|
121
179
|
and_return(messages[0..1])
|
180
|
+
expect(producer).to receive(:send_pending_metrics).twice
|
122
181
|
expect(producer).to receive(:produce_messages).ordered.with([
|
123
182
|
{
|
124
183
|
payload: 'mess1',
|
@@ -193,16 +252,70 @@ each_db_config(Deimos::Utils::DbProducer) do
|
|
193
252
|
expect(Deimos::KafkaTopicInfo).to receive(:register_error)
|
194
253
|
|
195
254
|
expect(Deimos::KafkaMessage.count).to eq(4)
|
196
|
-
Deimos.subscribe('db_producer.produce') do |event|
|
255
|
+
subscriber = Deimos.subscribe('db_producer.produce') do |event|
|
197
256
|
expect(event.payload[:exception_object].message).to eq('OH NOES')
|
198
257
|
expect(event.payload[:messages]).to eq(messages)
|
199
258
|
end
|
200
259
|
producer.process_topic('my-topic')
|
260
|
+
# don't delete for regular errors
|
261
|
+
expect(Deimos::KafkaMessage.count).to eq(4)
|
262
|
+
Deimos.unsubscribe(subscriber)
|
263
|
+
end
|
264
|
+
|
265
|
+
it 'should delete messages on buffer overflow' do
|
266
|
+
messages = (1..4).map do |i|
|
267
|
+
Deimos::KafkaMessage.create!(
|
268
|
+
id: i,
|
269
|
+
topic: 'my-topic',
|
270
|
+
message: "mess#{i}",
|
271
|
+
partition_key: "key#{i}"
|
272
|
+
)
|
273
|
+
end
|
274
|
+
|
275
|
+
expect(Deimos::KafkaTopicInfo).to receive(:lock).
|
276
|
+
with('my-topic', 'abc').and_return(true)
|
277
|
+
expect(producer).to receive(:produce_messages).and_raise(Kafka::BufferOverflow)
|
278
|
+
expect(producer).to receive(:retrieve_messages).and_return(messages)
|
279
|
+
expect(Deimos::KafkaTopicInfo).to receive(:register_error)
|
280
|
+
|
281
|
+
expect(Deimos::KafkaMessage.count).to eq(4)
|
282
|
+
producer.process_topic('my-topic')
|
201
283
|
expect(Deimos::KafkaMessage.count).to eq(0)
|
202
284
|
end
|
203
285
|
|
204
286
|
end
|
205
287
|
|
288
|
+
describe '#send_pending_metrics' do
|
289
|
+
it 'should use the first created_at for each topic' do |example|
|
290
|
+
# sqlite date-time strings don't work correctly
|
291
|
+
next if example.metadata[:db_config][:adapter] == 'sqlite3'
|
292
|
+
|
293
|
+
freeze_time do
|
294
|
+
(1..2).each do |i|
|
295
|
+
Deimos::KafkaMessage.create!(topic: "topic#{i}", message: nil,
|
296
|
+
created_at: (3 + i).minutes.ago)
|
297
|
+
Deimos::KafkaMessage.create!(topic: "topic#{i}", message: nil,
|
298
|
+
created_at: (2 + i).minutes.ago)
|
299
|
+
Deimos::KafkaMessage.create!(topic: "topic#{i}", message: nil,
|
300
|
+
created_at: (1 + i).minute.ago)
|
301
|
+
end
|
302
|
+
allow(Deimos.config.metrics).to receive(:gauge)
|
303
|
+
producer.send_pending_metrics
|
304
|
+
expect(Deimos.config.metrics).to have_received(:gauge).twice
|
305
|
+
expect(Deimos.config.metrics).to have_received(:gauge).
|
306
|
+
with('pending_db_messages_max_wait', 4.minutes.to_i, tags: ['topic:topic1'])
|
307
|
+
expect(Deimos.config.metrics).to have_received(:gauge).
|
308
|
+
with('pending_db_messages_max_wait', 5.minutes.to_i, tags: ['topic:topic2'])
|
309
|
+
end
|
310
|
+
end
|
311
|
+
|
312
|
+
it 'should send 0 if no messages' do
|
313
|
+
expect(Deimos.config.metrics).to receive(:gauge).
|
314
|
+
with('pending_db_messages_max_wait', 0)
|
315
|
+
producer.send_pending_metrics
|
316
|
+
end
|
317
|
+
end
|
318
|
+
|
206
319
|
example 'Full integration test' do
|
207
320
|
(1..4).each do |i|
|
208
321
|
(1..2).each do |j|
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: deimos-ruby
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.2.0.pre.beta1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Daniel Orner
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2019-09-
|
11
|
+
date: 2019-09-12 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: avro-patches
|