deimos-ruby 1.7.0.pre.beta1 → 1.8.0.pre.beta1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +13 -0
- data/Gemfile.lock +8 -2
- data/README.md +69 -15
- data/deimos-ruby.gemspec +2 -0
- data/docs/ARCHITECTURE.md +144 -0
- data/docs/CONFIGURATION.md +4 -0
- data/lib/deimos.rb +6 -6
- data/lib/deimos/active_record_consume/batch_consumption.rb +159 -0
- data/lib/deimos/active_record_consume/batch_slicer.rb +27 -0
- data/lib/deimos/active_record_consume/message_consumption.rb +58 -0
- data/lib/deimos/active_record_consume/schema_model_converter.rb +52 -0
- data/lib/deimos/active_record_consumer.rb +33 -75
- data/lib/deimos/batch_consumer.rb +2 -142
- data/lib/deimos/config/configuration.rb +8 -10
- data/lib/deimos/consume/batch_consumption.rb +148 -0
- data/lib/deimos/consume/message_consumption.rb +93 -0
- data/lib/deimos/consumer.rb +79 -72
- data/lib/deimos/kafka_message.rb +1 -1
- data/lib/deimos/message.rb +6 -1
- data/lib/deimos/utils/db_poller.rb +6 -6
- data/lib/deimos/utils/db_producer.rb +6 -2
- data/lib/deimos/utils/deadlock_retry.rb +68 -0
- data/lib/deimos/utils/lag_reporter.rb +19 -26
- data/lib/deimos/version.rb +1 -1
- data/spec/active_record_batch_consumer_spec.rb +481 -0
- data/spec/active_record_consume/batch_slicer_spec.rb +42 -0
- data/spec/active_record_consume/schema_model_converter_spec.rb +105 -0
- data/spec/active_record_consumer_spec.rb +3 -11
- data/spec/batch_consumer_spec.rb +23 -7
- data/spec/config/configuration_spec.rb +4 -0
- data/spec/consumer_spec.rb +6 -6
- data/spec/deimos_spec.rb +57 -49
- data/spec/handlers/my_batch_consumer.rb +6 -1
- data/spec/handlers/my_consumer.rb +6 -1
- data/spec/message_spec.rb +19 -0
- data/spec/schemas/com/my-namespace/MySchemaCompound-key.avsc +18 -0
- data/spec/schemas/com/my-namespace/Wibble.avsc +43 -0
- data/spec/spec_helper.rb +17 -0
- data/spec/utils/db_poller_spec.rb +2 -2
- data/spec/utils/deadlock_retry_spec.rb +74 -0
- data/spec/utils/lag_reporter_spec.rb +29 -22
- metadata +57 -16
- data/lib/deimos/base_consumer.rb +0 -100
- data/lib/deimos/utils/executor.rb +0 -124
- data/lib/deimos/utils/platform_schema_validation.rb +0 -0
- data/lib/deimos/utils/signal_handler.rb +0 -68
- data/spec/utils/executor_spec.rb +0 -53
- data/spec/utils/signal_handler_spec.rb +0 -16
@@ -87,9 +87,9 @@ module Deimos
|
|
87
87
|
begin
|
88
88
|
produce_messages(compacted_messages.map(&:phobos_message))
|
89
89
|
rescue Kafka::BufferOverflow, Kafka::MessageSizeTooLarge, Kafka::RecordListTooLarge
|
90
|
-
Deimos::KafkaMessage.where(id: messages.map(&:id)).delete_all
|
91
90
|
@logger.error('Message batch too large, deleting...')
|
92
91
|
@logger.error(Deimos::KafkaMessage.decoded(messages))
|
92
|
+
Deimos::KafkaMessage.where(id: messages.map(&:id)).delete_all
|
93
93
|
raise
|
94
94
|
end
|
95
95
|
end
|
@@ -133,7 +133,11 @@ module Deimos
|
|
133
133
|
metrics.gauge('pending_db_messages_max_wait', 0)
|
134
134
|
end
|
135
135
|
messages.each do |record|
|
136
|
-
|
136
|
+
earliest = record.earliest
|
137
|
+
# SQLite gives a string here
|
138
|
+
earliest = Time.zone.parse(earliest) if earliest.is_a?(String)
|
139
|
+
|
140
|
+
time_diff = Time.zone.now - earliest
|
137
141
|
metrics.gauge('pending_db_messages_max_wait', time_diff,
|
138
142
|
tags: ["topic:#{record.topic}"])
|
139
143
|
end
|
@@ -0,0 +1,68 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Deimos
|
4
|
+
module Utils
|
5
|
+
# Utility class to retry a given block if a a deadlock is encountered.
|
6
|
+
# Supports Postgres and MySQL deadlocks and lock wait timeouts.
|
7
|
+
class DeadlockRetry
|
8
|
+
class << self
|
9
|
+
# Maximum number of times to retry the block after encountering a deadlock
|
10
|
+
RETRY_COUNT = 2
|
11
|
+
|
12
|
+
# Need to match on error messages to support older Rails versions
|
13
|
+
DEADLOCK_MESSAGES = [
|
14
|
+
# MySQL
|
15
|
+
'Deadlock found when trying to get lock',
|
16
|
+
'Lock wait timeout exceeded',
|
17
|
+
|
18
|
+
# Postgres
|
19
|
+
'deadlock detected'
|
20
|
+
].freeze
|
21
|
+
|
22
|
+
# Retry the given block when encountering a deadlock. For any other
|
23
|
+
# exceptions, they are reraised. This is used to handle cases where
|
24
|
+
# the database may be busy but the transaction would succeed if
|
25
|
+
# retried later. Note that your block should be idempotent and it will
|
26
|
+
# be wrapped in a transaction.
|
27
|
+
# Sleeps for a random number of seconds to prevent multiple transactions
|
28
|
+
# from retrying at the same time.
|
29
|
+
# @param tags [Array] Tags to attach when logging and reporting metrics.
|
30
|
+
# @yield Yields to the block that may deadlock.
|
31
|
+
def wrap(tags=[])
|
32
|
+
count = RETRY_COUNT
|
33
|
+
|
34
|
+
begin
|
35
|
+
ActiveRecord::Base.transaction do
|
36
|
+
yield
|
37
|
+
end
|
38
|
+
rescue ActiveRecord::StatementInvalid => e
|
39
|
+
# Reraise if not a known deadlock
|
40
|
+
raise if DEADLOCK_MESSAGES.none? { |m| e.message.include?(m) }
|
41
|
+
|
42
|
+
# Reraise if all retries exhausted
|
43
|
+
raise if count <= 0
|
44
|
+
|
45
|
+
Deimos.config.logger.warn(
|
46
|
+
message: 'Deadlock encountered when trying to execute query. '\
|
47
|
+
"Retrying. #{count} attempt(s) remaining",
|
48
|
+
tags: tags
|
49
|
+
)
|
50
|
+
|
51
|
+
Deimos.config.metrics&.increment(
|
52
|
+
'deadlock',
|
53
|
+
tags: tags
|
54
|
+
)
|
55
|
+
|
56
|
+
count -= 1
|
57
|
+
|
58
|
+
# Sleep for a random amount so that if there are multiple
|
59
|
+
# transactions deadlocking, they don't all retry at the same time
|
60
|
+
sleep(Random.rand(5.0) + 0.5)
|
61
|
+
|
62
|
+
retry
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
@@ -29,30 +29,21 @@ module Deimos
|
|
29
29
|
self.topics[topic.to_s].report_lag(partition)
|
30
30
|
end
|
31
31
|
|
32
|
-
# @param topic [String]
|
33
|
-
# @param partition [Integer]
|
34
|
-
# @param lag [Integer]
|
35
|
-
def assign_lag(topic, partition, lag)
|
36
|
-
self.topics[topic.to_s] ||= Topic.new(topic, self)
|
37
|
-
self.topics[topic.to_s].assign_lag(partition, lag)
|
38
|
-
end
|
39
|
-
|
40
|
-
# Figure out the current lag by asking Kafka based on the current offset.
|
41
32
|
# @param topic [String]
|
42
33
|
# @param partition [Integer]
|
43
34
|
# @param offset [Integer]
|
44
|
-
def
|
35
|
+
def assign_current_offset(topic, partition, offset)
|
45
36
|
self.topics[topic.to_s] ||= Topic.new(topic, self)
|
46
|
-
self.topics[topic.to_s].
|
37
|
+
self.topics[topic.to_s].assign_current_offset(partition, offset)
|
47
38
|
end
|
48
39
|
end
|
49
40
|
|
50
|
-
# Topic which has a hash of partition => last known
|
41
|
+
# Topic which has a hash of partition => last known current offsets
|
51
42
|
class Topic
|
52
43
|
# @return [String]
|
53
44
|
attr_accessor :topic_name
|
54
45
|
# @return [Hash<Integer, Integer>]
|
55
|
-
attr_accessor :
|
46
|
+
attr_accessor :partition_current_offsets
|
56
47
|
# @return [ConsumerGroup]
|
57
48
|
attr_accessor :consumer_group
|
58
49
|
|
@@ -61,35 +52,33 @@ module Deimos
|
|
61
52
|
def initialize(topic_name, group)
|
62
53
|
self.topic_name = topic_name
|
63
54
|
self.consumer_group = group
|
64
|
-
self.
|
55
|
+
self.partition_current_offsets = {}
|
65
56
|
end
|
66
57
|
|
67
58
|
# @param partition [Integer]
|
68
|
-
|
69
|
-
|
70
|
-
self.partition_offset_lags[partition.to_i] = lag
|
59
|
+
def assign_current_offset(partition, offset)
|
60
|
+
self.partition_current_offsets[partition.to_i] = offset
|
71
61
|
end
|
72
62
|
|
73
63
|
# @param partition [Integer]
|
74
|
-
# @param offset [Integer]
|
75
64
|
def compute_lag(partition, offset)
|
76
|
-
return if self.partition_offset_lags[partition.to_i]
|
77
|
-
|
78
65
|
begin
|
79
66
|
client = Phobos.create_kafka_client
|
80
67
|
last_offset = client.last_offset_for(self.topic_name, partition)
|
81
|
-
|
68
|
+
lag = last_offset - offset
|
82
69
|
rescue StandardError # don't do anything, just wait
|
83
70
|
Deimos.config.logger.
|
84
71
|
debug("Error computing lag for #{self.topic_name}, will retry")
|
85
72
|
end
|
73
|
+
lag || 0
|
86
74
|
end
|
87
75
|
|
88
76
|
# @param partition [Integer]
|
89
77
|
def report_lag(partition)
|
90
|
-
|
91
|
-
return unless
|
78
|
+
current_offset = self.partition_current_offsets[partition.to_i]
|
79
|
+
return unless current_offset
|
92
80
|
|
81
|
+
lag = compute_lag(partition, current_offset)
|
93
82
|
group = self.consumer_group.id
|
94
83
|
Deimos.config.logger.
|
95
84
|
debug("Sending lag: #{group}/#{partition}: #{lag}")
|
@@ -109,16 +98,20 @@ module Deimos
|
|
109
98
|
@groups = {}
|
110
99
|
end
|
111
100
|
|
101
|
+
# offset_lag = event.payload.fetch(:offset_lag)
|
102
|
+
# group_id = event.payload.fetch(:group_id)
|
103
|
+
# topic = event.payload.fetch(:topic)
|
104
|
+
# partition = event.payload.fetch(:partition)
|
112
105
|
# @param payload [Hash]
|
113
106
|
def message_processed(payload)
|
114
|
-
|
107
|
+
offset = payload[:offset] || payload[:last_offset]
|
115
108
|
topic = payload[:topic]
|
116
109
|
group = payload[:group_id]
|
117
110
|
partition = payload[:partition]
|
118
111
|
|
119
112
|
synchronize do
|
120
113
|
@groups[group.to_s] ||= ConsumerGroup.new(group)
|
121
|
-
@groups[group.to_s].
|
114
|
+
@groups[group.to_s].assign_current_offset(topic, partition, offset)
|
122
115
|
end
|
123
116
|
end
|
124
117
|
|
@@ -131,7 +124,7 @@ module Deimos
|
|
131
124
|
|
132
125
|
synchronize do
|
133
126
|
@groups[group.to_s] ||= ConsumerGroup.new(group)
|
134
|
-
@groups[group.to_s].
|
127
|
+
@groups[group.to_s].assign_current_offset(topic, partition, offset)
|
135
128
|
end
|
136
129
|
end
|
137
130
|
|
data/lib/deimos/version.rb
CHANGED
@@ -0,0 +1,481 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Wrapped in a module to prevent class leakage
|
4
|
+
module ActiveRecordBatchConsumerTest
|
5
|
+
describe Deimos::ActiveRecordConsumer do
|
6
|
+
# Create ActiveRecord table and model
|
7
|
+
before(:all) do
|
8
|
+
ActiveRecord::Base.connection.create_table(:widgets, force: true) do |t|
|
9
|
+
t.string(:test_id)
|
10
|
+
t.string(:part_one)
|
11
|
+
t.string(:part_two)
|
12
|
+
t.integer(:some_int)
|
13
|
+
t.boolean(:deleted, default: false)
|
14
|
+
t.timestamps
|
15
|
+
|
16
|
+
t.index(%i(part_one part_two), unique: true)
|
17
|
+
end
|
18
|
+
|
19
|
+
# Sample model
|
20
|
+
class Widget < ActiveRecord::Base
|
21
|
+
validates :test_id, presence: true
|
22
|
+
|
23
|
+
default_scope -> { where(deleted: false) }
|
24
|
+
end
|
25
|
+
|
26
|
+
Widget.reset_column_information
|
27
|
+
end
|
28
|
+
|
29
|
+
after(:all) do
|
30
|
+
ActiveRecord::Base.connection.drop_table(:widgets)
|
31
|
+
end
|
32
|
+
|
33
|
+
prepend_before(:each) do
|
34
|
+
stub_const('MyBatchConsumer', consumer_class)
|
35
|
+
end
|
36
|
+
|
37
|
+
around(:each) do |ex|
|
38
|
+
# Set and freeze example time
|
39
|
+
travel_to start do
|
40
|
+
ex.run
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
# Default starting time
|
45
|
+
let(:start) { Time.zone.local(2019, 1, 1, 10, 30, 0) }
|
46
|
+
|
47
|
+
# Basic uncompacted consumer
|
48
|
+
let(:consumer_class) do
|
49
|
+
Class.new(described_class) do
|
50
|
+
schema 'MySchema'
|
51
|
+
namespace 'com.my-namespace'
|
52
|
+
key_config plain: true
|
53
|
+
record_class Widget
|
54
|
+
compacted false
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
# Helper to get all instances, ignoring default scopes
|
59
|
+
def all_widgets
|
60
|
+
Widget.unscoped.all
|
61
|
+
end
|
62
|
+
|
63
|
+
# Helper to publish a list of messages and call the consumer
|
64
|
+
def publish_batch(messages)
|
65
|
+
keys = messages.map { |m| m[:key] }
|
66
|
+
payloads = messages.map { |m| m[:payload] }
|
67
|
+
|
68
|
+
test_consume_batch(MyBatchConsumer, payloads, keys: keys, call_original: true)
|
69
|
+
end
|
70
|
+
|
71
|
+
it 'should handle an empty batch' do
|
72
|
+
expect { publish_batch([]) }.not_to raise_error
|
73
|
+
end
|
74
|
+
|
75
|
+
it 'should create records from a batch' do
|
76
|
+
publish_batch(
|
77
|
+
[
|
78
|
+
{ key: 1,
|
79
|
+
payload: { test_id: 'abc', some_int: 3 } },
|
80
|
+
{ key: 2,
|
81
|
+
payload: { test_id: 'def', some_int: 4 } }
|
82
|
+
]
|
83
|
+
)
|
84
|
+
|
85
|
+
expect(all_widgets).
|
86
|
+
to match_array(
|
87
|
+
[
|
88
|
+
have_attributes(id: 1, test_id: 'abc', some_int: 3, updated_at: start, created_at: start),
|
89
|
+
have_attributes(id: 2, test_id: 'def', some_int: 4, updated_at: start, created_at: start)
|
90
|
+
]
|
91
|
+
)
|
92
|
+
end
|
93
|
+
|
94
|
+
it 'should handle deleting a record that doesn\'t exist' do
|
95
|
+
publish_batch(
|
96
|
+
[
|
97
|
+
{ key: 1,
|
98
|
+
payload: nil }
|
99
|
+
]
|
100
|
+
)
|
101
|
+
|
102
|
+
expect(all_widgets).to be_empty
|
103
|
+
end
|
104
|
+
|
105
|
+
it 'should handle an update, followed by a delete in the correct order' do
|
106
|
+
Widget.create!(id: 1, test_id: 'abc', some_int: 2)
|
107
|
+
|
108
|
+
publish_batch(
|
109
|
+
[
|
110
|
+
{ key: 1,
|
111
|
+
payload: { test_id: 'abc', some_int: 3 } },
|
112
|
+
{ key: 1,
|
113
|
+
payload: nil }
|
114
|
+
]
|
115
|
+
)
|
116
|
+
|
117
|
+
expect(all_widgets).to be_empty
|
118
|
+
end
|
119
|
+
|
120
|
+
it 'should handle a delete, followed by an update in the correct order' do
|
121
|
+
Widget.create!(id: 1, test_id: 'abc', some_int: 2)
|
122
|
+
|
123
|
+
travel 1.day
|
124
|
+
|
125
|
+
publish_batch(
|
126
|
+
[
|
127
|
+
{ key: 1,
|
128
|
+
payload: nil },
|
129
|
+
{ key: 1,
|
130
|
+
payload: { test_id: 'abc', some_int: 3 } }
|
131
|
+
]
|
132
|
+
)
|
133
|
+
|
134
|
+
expect(all_widgets).
|
135
|
+
to match_array(
|
136
|
+
[
|
137
|
+
have_attributes(id: 1, test_id: 'abc', some_int: 3, updated_at: Time.zone.now, created_at: Time.zone.now)
|
138
|
+
]
|
139
|
+
)
|
140
|
+
end
|
141
|
+
|
142
|
+
it 'should handle a double update' do
|
143
|
+
Widget.create!(id: 1, test_id: 'abc', some_int: 2)
|
144
|
+
|
145
|
+
travel 1.day
|
146
|
+
|
147
|
+
publish_batch(
|
148
|
+
[
|
149
|
+
{ key: 1,
|
150
|
+
payload: { test_id: 'def', some_int: 3 } },
|
151
|
+
{ key: 1,
|
152
|
+
payload: { test_id: 'ghi', some_int: 4 } }
|
153
|
+
]
|
154
|
+
)
|
155
|
+
|
156
|
+
expect(all_widgets).
|
157
|
+
to match_array(
|
158
|
+
[
|
159
|
+
have_attributes(id: 1, test_id: 'ghi', some_int: 4, updated_at: Time.zone.now, created_at: start)
|
160
|
+
]
|
161
|
+
)
|
162
|
+
end
|
163
|
+
|
164
|
+
it 'should handle a double deletion' do
|
165
|
+
Widget.create!(id: 1, test_id: 'abc', some_int: 2)
|
166
|
+
|
167
|
+
publish_batch(
|
168
|
+
[
|
169
|
+
{ key: 1,
|
170
|
+
payload: nil },
|
171
|
+
{ key: 1,
|
172
|
+
payload: nil }
|
173
|
+
]
|
174
|
+
)
|
175
|
+
|
176
|
+
expect(all_widgets).to be_empty
|
177
|
+
end
|
178
|
+
|
179
|
+
it 'should ignore default scopes' do
|
180
|
+
Widget.create!(id: 1, test_id: 'abc', some_int: 2, deleted: true)
|
181
|
+
Widget.create!(id: 2, test_id: 'def', some_int: 3, deleted: true)
|
182
|
+
|
183
|
+
publish_batch(
|
184
|
+
[
|
185
|
+
{ key: 1,
|
186
|
+
payload: nil },
|
187
|
+
{ key: 2,
|
188
|
+
payload: { test_id: 'def', some_int: 5 } }
|
189
|
+
]
|
190
|
+
)
|
191
|
+
|
192
|
+
expect(all_widgets).
|
193
|
+
to match_array(
|
194
|
+
[
|
195
|
+
have_attributes(id: 2, test_id: 'def', some_int: 5)
|
196
|
+
]
|
197
|
+
)
|
198
|
+
end
|
199
|
+
|
200
|
+
describe 'compacted mode' do
|
201
|
+
# Create a compacted consumer
|
202
|
+
let(:consumer_class) do
|
203
|
+
Class.new(described_class) do
|
204
|
+
schema 'MySchema'
|
205
|
+
namespace 'com.my-namespace'
|
206
|
+
key_config plain: true
|
207
|
+
record_class Widget
|
208
|
+
|
209
|
+
# :no-doc:
|
210
|
+
def deleted_query(_records)
|
211
|
+
raise 'Should not have anything to delete!'
|
212
|
+
end
|
213
|
+
end
|
214
|
+
end
|
215
|
+
|
216
|
+
it 'should allow for compacted batches' do
|
217
|
+
expect(Widget).to receive(:import!).once.and_call_original
|
218
|
+
|
219
|
+
publish_batch(
|
220
|
+
[
|
221
|
+
{ key: 1,
|
222
|
+
payload: nil },
|
223
|
+
{ key: 2,
|
224
|
+
payload: { test_id: 'xyz', some_int: 5 } },
|
225
|
+
{ key: 1,
|
226
|
+
payload: { test_id: 'abc', some_int: 3 } },
|
227
|
+
{ key: 2,
|
228
|
+
payload: { test_id: 'def', some_int: 4 } },
|
229
|
+
{ key: 3,
|
230
|
+
payload: { test_id: 'hij', some_int: 9 } }
|
231
|
+
]
|
232
|
+
)
|
233
|
+
|
234
|
+
expect(all_widgets).
|
235
|
+
to match_array(
|
236
|
+
[
|
237
|
+
have_attributes(id: 1, test_id: 'abc', some_int: 3),
|
238
|
+
have_attributes(id: 2, test_id: 'def', some_int: 4),
|
239
|
+
have_attributes(id: 3, test_id: 'hij', some_int: 9)
|
240
|
+
]
|
241
|
+
)
|
242
|
+
end
|
243
|
+
end
|
244
|
+
|
245
|
+
describe 'batch atomicity' do
|
246
|
+
it 'should roll back if there was an exception while deleting' do
|
247
|
+
Widget.create!(id: 1, test_id: 'abc', some_int: 2)
|
248
|
+
|
249
|
+
travel 1.day
|
250
|
+
|
251
|
+
expect(Widget.connection).to receive(:delete).and_raise('Some error')
|
252
|
+
|
253
|
+
expect {
|
254
|
+
publish_batch(
|
255
|
+
[
|
256
|
+
{ key: 1,
|
257
|
+
payload: { test_id: 'def', some_int: 3 } },
|
258
|
+
{ key: 1,
|
259
|
+
payload: nil }
|
260
|
+
]
|
261
|
+
)
|
262
|
+
}.to raise_error('Some error')
|
263
|
+
|
264
|
+
expect(all_widgets).
|
265
|
+
to match_array(
|
266
|
+
[
|
267
|
+
have_attributes(id: 1, test_id: 'abc', some_int: 2, updated_at: start, created_at: start)
|
268
|
+
]
|
269
|
+
)
|
270
|
+
end
|
271
|
+
|
272
|
+
it 'should roll back if there was an invalid instance while upserting' do
|
273
|
+
Widget.create!(id: 1, test_id: 'abc', some_int: 2) # Updated but rolled back
|
274
|
+
Widget.create!(id: 3, test_id: 'ghi', some_int: 3) # Removed but rolled back
|
275
|
+
|
276
|
+
travel 1.day
|
277
|
+
|
278
|
+
expect {
|
279
|
+
publish_batch(
|
280
|
+
[
|
281
|
+
{ key: 1,
|
282
|
+
payload: { test_id: 'def', some_int: 3 } },
|
283
|
+
{ key: 2,
|
284
|
+
payload: nil },
|
285
|
+
{ key: 2,
|
286
|
+
payload: { test_id: '', some_int: 4 } }, # Empty string is not valid for test_id
|
287
|
+
{ key: 3,
|
288
|
+
payload: nil }
|
289
|
+
]
|
290
|
+
)
|
291
|
+
}.to raise_error(ActiveRecord::RecordInvalid)
|
292
|
+
|
293
|
+
expect(all_widgets).
|
294
|
+
to match_array(
|
295
|
+
[
|
296
|
+
have_attributes(id: 1, test_id: 'abc', some_int: 2, updated_at: start, created_at: start),
|
297
|
+
have_attributes(id: 3, test_id: 'ghi', some_int: 3, updated_at: start, created_at: start)
|
298
|
+
]
|
299
|
+
)
|
300
|
+
end
|
301
|
+
end
|
302
|
+
|
303
|
+
describe 'compound keys' do
|
304
|
+
let(:consumer_class) do
|
305
|
+
Class.new(described_class) do
|
306
|
+
schema 'MySchema'
|
307
|
+
namespace 'com.my-namespace'
|
308
|
+
key_config schema: 'MySchemaCompound-key'
|
309
|
+
record_class Widget
|
310
|
+
compacted false
|
311
|
+
|
312
|
+
# :no-doc:
|
313
|
+
def deleted_query(records)
|
314
|
+
keys = records.
|
315
|
+
map { |m| record_key(m.key) }.
|
316
|
+
reject(&:empty?)
|
317
|
+
|
318
|
+
# Only supported on Rails 5+
|
319
|
+
keys.reduce(@klass.none) do |query, key|
|
320
|
+
query.or(@klass.unscoped.where(key))
|
321
|
+
end
|
322
|
+
end
|
323
|
+
end
|
324
|
+
end
|
325
|
+
|
326
|
+
it 'should consume with compound keys' do
|
327
|
+
Widget.create!(test_id: 'xxx', some_int: 2, part_one: 'ghi', part_two: 'jkl')
|
328
|
+
Widget.create!(test_id: 'yyy', some_int: 7, part_one: 'mno', part_two: 'pqr')
|
329
|
+
|
330
|
+
publish_batch(
|
331
|
+
[
|
332
|
+
{ key: { part_one: 'abc', part_two: 'def' }, # To be created
|
333
|
+
payload: { test_id: 'aaa', some_int: 3 } },
|
334
|
+
{ key: { part_one: 'ghi', part_two: 'jkl' }, # To be updated
|
335
|
+
payload: { test_id: 'bbb', some_int: 4 } },
|
336
|
+
{ key: { part_one: 'mno', part_two: 'pqr' }, # To be deleted
|
337
|
+
payload: nil }
|
338
|
+
]
|
339
|
+
)
|
340
|
+
|
341
|
+
expect(all_widgets).
|
342
|
+
to match_array(
|
343
|
+
[
|
344
|
+
have_attributes(test_id: 'aaa', some_int: 3, part_one: 'abc', part_two: 'def'),
|
345
|
+
have_attributes(test_id: 'bbb', some_int: 4, part_one: 'ghi', part_two: 'jkl')
|
346
|
+
]
|
347
|
+
)
|
348
|
+
end
|
349
|
+
end
|
350
|
+
|
351
|
+
describe 'no keys' do
|
352
|
+
let(:consumer_class) do
|
353
|
+
Class.new(described_class) do
|
354
|
+
schema 'MySchema'
|
355
|
+
namespace 'com.my-namespace'
|
356
|
+
key_config none: true
|
357
|
+
record_class Widget
|
358
|
+
end
|
359
|
+
end
|
360
|
+
|
361
|
+
it 'should handle unkeyed topics' do
|
362
|
+
Widget.create!(test_id: 'xxx', some_int: 2)
|
363
|
+
|
364
|
+
publish_batch(
|
365
|
+
[
|
366
|
+
{ payload: { test_id: 'aaa', some_int: 3 } },
|
367
|
+
{ payload: { test_id: 'bbb', some_int: 4 } },
|
368
|
+
{ payload: nil } # Should be ignored. Can't delete with no key
|
369
|
+
]
|
370
|
+
)
|
371
|
+
|
372
|
+
expect(all_widgets).
|
373
|
+
to match_array(
|
374
|
+
[
|
375
|
+
have_attributes(test_id: 'xxx', some_int: 2),
|
376
|
+
have_attributes(test_id: 'aaa', some_int: 3),
|
377
|
+
have_attributes(test_id: 'bbb', some_int: 4)
|
378
|
+
]
|
379
|
+
)
|
380
|
+
end
|
381
|
+
end
|
382
|
+
|
383
|
+
describe 'soft deletion' do
|
384
|
+
let(:consumer_class) do
|
385
|
+
Class.new(described_class) do
|
386
|
+
schema 'MySchema'
|
387
|
+
namespace 'com.my-namespace'
|
388
|
+
key_config plain: true
|
389
|
+
record_class Widget
|
390
|
+
compacted false
|
391
|
+
|
392
|
+
# Sample customization: Soft delete
|
393
|
+
def remove_records(records)
|
394
|
+
deleted = deleted_query(records)
|
395
|
+
|
396
|
+
deleted.update_all(
|
397
|
+
deleted: true,
|
398
|
+
updated_at: Time.zone.now
|
399
|
+
)
|
400
|
+
end
|
401
|
+
|
402
|
+
# Sample customization: Undelete records
|
403
|
+
def record_attributes(payload, key)
|
404
|
+
super.merge(deleted: false)
|
405
|
+
end
|
406
|
+
end
|
407
|
+
end
|
408
|
+
|
409
|
+
it 'should mark records deleted' do
|
410
|
+
Widget.create!(id: 1, test_id: 'abc', some_int: 2)
|
411
|
+
Widget.create!(id: 3, test_id: 'xyz', some_int: 4)
|
412
|
+
Widget.create!(id: 4, test_id: 'uvw', some_int: 5, deleted: true)
|
413
|
+
|
414
|
+
travel 1.day
|
415
|
+
|
416
|
+
publish_batch(
|
417
|
+
[
|
418
|
+
{ key: 1,
|
419
|
+
payload: nil },
|
420
|
+
{ key: 1, # Double delete for key 1
|
421
|
+
payload: nil },
|
422
|
+
{ key: 2, # Create 2
|
423
|
+
payload: { test_id: 'def', some_int: 3 } },
|
424
|
+
{ key: 2, # Delete 2
|
425
|
+
payload: nil },
|
426
|
+
{ key: 3, # Update non-deleted
|
427
|
+
payload: { test_id: 'ghi', some_int: 4 } },
|
428
|
+
{ key: 4, # Revive
|
429
|
+
payload: { test_id: 'uvw', some_int: 5 } }
|
430
|
+
]
|
431
|
+
)
|
432
|
+
|
433
|
+
expect(all_widgets).
|
434
|
+
to match_array(
|
435
|
+
[
|
436
|
+
have_attributes(id: 1, test_id: 'abc', some_int: 2, deleted: true,
|
437
|
+
created_at: start, updated_at: Time.zone.now),
|
438
|
+
have_attributes(id: 2, test_id: 'def', some_int: 3, deleted: true,
|
439
|
+
created_at: Time.zone.now, updated_at: Time.zone.now),
|
440
|
+
have_attributes(id: 3, test_id: 'ghi', some_int: 4, deleted: false,
|
441
|
+
created_at: start, updated_at: Time.zone.now),
|
442
|
+
have_attributes(id: 4, test_id: 'uvw', some_int: 5, deleted: false,
|
443
|
+
created_at: start, updated_at: Time.zone.now)
|
444
|
+
]
|
445
|
+
)
|
446
|
+
end
|
447
|
+
end
|
448
|
+
|
449
|
+
describe 'skipping records' do
|
450
|
+
let(:consumer_class) do
|
451
|
+
Class.new(described_class) do
|
452
|
+
schema 'MySchema'
|
453
|
+
namespace 'com.my-namespace'
|
454
|
+
key_config plain: true
|
455
|
+
record_class Widget
|
456
|
+
|
457
|
+
# Sample customization: Skipping records
|
458
|
+
def record_attributes(payload, key)
|
459
|
+
return nil if payload[:test_id] == 'skipme'
|
460
|
+
|
461
|
+
super
|
462
|
+
end
|
463
|
+
end
|
464
|
+
end
|
465
|
+
|
466
|
+
it 'should allow overriding to skip any unwanted records' do
|
467
|
+
publish_batch(
|
468
|
+
[
|
469
|
+
{ key: 1, # Record that consumer can decide to skip
|
470
|
+
payload: { test_id: 'skipme' } },
|
471
|
+
{ key: 2,
|
472
|
+
payload: { test_id: 'abc123' } }
|
473
|
+
]
|
474
|
+
)
|
475
|
+
|
476
|
+
expect(all_widgets).
|
477
|
+
to match_array([have_attributes(id: 2, test_id: 'abc123')])
|
478
|
+
end
|
479
|
+
end
|
480
|
+
end
|
481
|
+
end
|