ruby_event_store-outbox 0.0.30 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/ruby_event_store/outbox/cleanup_strategies/none.rb +2 -1
- data/lib/ruby_event_store/outbox/cleanup_strategies.rb +1 -1
- data/lib/ruby_event_store/outbox/cli.rb +30 -26
- data/lib/ruby_event_store/outbox/configuration.rb +13 -9
- data/lib/ruby_event_store/outbox/consumer.rb +13 -135
- data/lib/ruby_event_store/outbox/metrics/influx.rb +5 -6
- data/lib/ruby_event_store/outbox/metrics/null.rb +4 -2
- data/lib/ruby_event_store/outbox/metrics/test.rb +1 -1
- data/lib/ruby_event_store/outbox/repository.rb +126 -15
- data/lib/ruby_event_store/outbox/runner.rb +3 -0
- data/lib/ruby_event_store/outbox/sidekiq_processor.rb +1 -1
- data/lib/ruby_event_store/outbox/sidekiq_producer.rb +1 -1
- data/lib/ruby_event_store/outbox/version.rb +1 -1
- metadata +3 -7
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 67529756ee35bbaf8447ed5ed8afbc6b6e80e2eaae62a1c396492fe4f6fcb7e0
|
4
|
+
data.tar.gz: 617091c19ad0d7bd1a688178740b43aa41165ed7cd9a5a371da33ff7734d3873
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 2ec5edfc468a99b05f38a9fc6f2db83bb30f2fd86a1c995f98476dee542a99b8f811a83e46e6915b20873eb1c291e5dc7f95a996542c3017a026fbad93eeb383
|
7
|
+
data.tar.gz: 63ee434baae965c530ac947820d0b1f5d2cf682268da3b7c377dde42eedd8eea91b3d56dbd6a96a0e10ec288d4ce6bedbb3cbac9bc874ea03642572f977f7b2a
|
@@ -20,7 +20,8 @@ module RubyEventStore
|
|
20
20
|
metrics_url: nil,
|
21
21
|
cleanup_strategy: :none,
|
22
22
|
cleanup_limit: :all,
|
23
|
-
sleep_on_empty: 0.5
|
23
|
+
sleep_on_empty: 0.5,
|
24
|
+
locking: true,
|
24
25
|
}
|
25
26
|
Options = Struct.new(*DEFAULTS.keys)
|
26
27
|
|
@@ -31,10 +32,9 @@ module RubyEventStore
|
|
31
32
|
.new do |option_parser|
|
32
33
|
option_parser.banner = "Usage: res_outbox [options]"
|
33
34
|
|
34
|
-
option_parser.on(
|
35
|
-
|
36
|
-
|
37
|
-
) { |database_url| options.database_url = database_url }
|
35
|
+
option_parser.on("--database-url=DATABASE_URL", "Database where outbox table is stored") do |database_url|
|
36
|
+
options.database_url = database_url
|
37
|
+
end
|
38
38
|
|
39
39
|
option_parser.on("--redis-url=REDIS_URL", "URL to redis database") do |redis_url|
|
40
40
|
options.redis_url = redis_url
|
@@ -43,25 +43,25 @@ module RubyEventStore
|
|
43
43
|
option_parser.on(
|
44
44
|
"--log-level=LOG_LEVEL",
|
45
45
|
%i[fatal error warn info debug],
|
46
|
-
"Logging level, one of: fatal, error, warn, info, debug. Default: warn"
|
46
|
+
"Logging level, one of: fatal, error, warn, info, debug. Default: warn",
|
47
47
|
) { |log_level| options.log_level = log_level.to_sym }
|
48
48
|
|
49
49
|
option_parser.on(
|
50
50
|
"--message-format=FORMAT",
|
51
51
|
["sidekiq5"],
|
52
|
-
"Message format, supported: sidekiq5. Default: sidekiq5"
|
52
|
+
"Message format, supported: sidekiq5. Default: sidekiq5",
|
53
53
|
) { |message_format| options.message_format = message_format }
|
54
54
|
|
55
55
|
option_parser.on(
|
56
56
|
"--split-keys=SPLIT_KEYS",
|
57
57
|
Array,
|
58
|
-
"Split keys which should be handled, all if not specified"
|
58
|
+
"Split keys which should be handled, all if not specified",
|
59
59
|
) { |split_keys| options.split_keys = split_keys if !split_keys.empty? }
|
60
60
|
|
61
61
|
option_parser.on(
|
62
62
|
"--batch-size=BATCH_SIZE",
|
63
63
|
Integer,
|
64
|
-
"Amount of records fetched in one fetch. Bigger value means more duplicated messages when network problems occur. Default: 100"
|
64
|
+
"Amount of records fetched in one fetch. Bigger value means more duplicated messages when network problems occur. Default: 100",
|
65
65
|
) { |batch_size| options.batch_size = batch_size }
|
66
66
|
|
67
67
|
option_parser.on("--metrics-url=METRICS_URL", "URI to metrics collector, optional") do |metrics_url|
|
@@ -70,20 +70,24 @@ module RubyEventStore
|
|
70
70
|
|
71
71
|
option_parser.on(
|
72
72
|
"--cleanup=STRATEGY",
|
73
|
-
"A strategy for cleaning old records. One of: none or iso8601 duration format how old enqueued records should be removed. Default: none"
|
73
|
+
"A strategy for cleaning old records. One of: none or iso8601 duration format how old enqueued records should be removed. Default: none",
|
74
74
|
) { |cleanup_strategy| options.cleanup_strategy = cleanup_strategy }
|
75
75
|
|
76
76
|
option_parser.on(
|
77
77
|
"--cleanup-limit=LIMIT",
|
78
|
-
"Amount of records removed in single cleanup run. One of: all or number of records that should be removed. Default: all"
|
78
|
+
"Amount of records removed in single cleanup run. One of: all or number of records that should be removed. Default: all",
|
79
79
|
) { |cleanup_limit| options.cleanup_limit = cleanup_limit }
|
80
80
|
|
81
81
|
option_parser.on(
|
82
82
|
"--sleep-on-empty=SLEEP_TIME",
|
83
83
|
Float,
|
84
|
-
"How long to sleep before next check when there was nothing to do. Default: 0.5"
|
84
|
+
"How long to sleep before next check when there was nothing to do. Default: 0.5",
|
85
85
|
) { |sleep_on_empty| options.sleep_on_empty = sleep_on_empty }
|
86
86
|
|
87
|
+
option_parser.on("-l", "--[no-]lock", "Lock split key in consumer") do |locking|
|
88
|
+
options.locking = locking
|
89
|
+
end
|
90
|
+
|
87
91
|
option_parser.on_tail("--version", "Show version") do
|
88
92
|
puts VERSION
|
89
93
|
exit
|
@@ -96,26 +100,26 @@ module RubyEventStore
|
|
96
100
|
|
97
101
|
def run(argv)
|
98
102
|
options = Parser.parse(argv)
|
99
|
-
build_runner(options)
|
100
|
-
.run
|
103
|
+
build_runner(options).run
|
101
104
|
end
|
102
105
|
|
103
106
|
def build_runner(options)
|
104
107
|
consumer_uuid = SecureRandom.uuid
|
105
108
|
logger = Logger.new(STDOUT, level: options.log_level, progname: "RES-Outbox #{consumer_uuid}")
|
106
|
-
consumer_configuration =
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
109
|
+
consumer_configuration =
|
110
|
+
Configuration.new(
|
111
|
+
split_keys: options.split_keys,
|
112
|
+
message_format: options.message_format,
|
113
|
+
batch_size: options.batch_size,
|
114
|
+
database_url: options.database_url,
|
115
|
+
redis_url: options.redis_url,
|
116
|
+
cleanup: options.cleanup_strategy,
|
117
|
+
cleanup_limit: options.cleanup_limit,
|
118
|
+
sleep_on_empty: options.sleep_on_empty,
|
119
|
+
locking: options.locking,
|
120
|
+
)
|
116
121
|
metrics = Metrics.from_url(options.metrics_url)
|
117
|
-
outbox_consumer =
|
118
|
-
Outbox::Consumer.new(consumer_uuid, consumer_configuration, logger: logger, metrics: metrics)
|
122
|
+
outbox_consumer = Outbox::Consumer.new(consumer_uuid, consumer_configuration, logger: logger, metrics: metrics)
|
119
123
|
Runner.new(outbox_consumer, consumer_configuration, logger: logger)
|
120
124
|
end
|
121
125
|
end
|
@@ -11,7 +11,8 @@ module RubyEventStore
|
|
11
11
|
redis_url:,
|
12
12
|
cleanup:,
|
13
13
|
cleanup_limit:,
|
14
|
-
sleep_on_empty
|
14
|
+
sleep_on_empty:,
|
15
|
+
locking:
|
15
16
|
)
|
16
17
|
@split_keys = split_keys
|
17
18
|
@message_format = message_format
|
@@ -21,6 +22,7 @@ module RubyEventStore
|
|
21
22
|
@cleanup = cleanup
|
22
23
|
@cleanup_limit = cleanup_limit
|
23
24
|
@sleep_on_empty = sleep_on_empty
|
25
|
+
@locking = locking
|
24
26
|
freeze
|
25
27
|
end
|
26
28
|
|
@@ -33,18 +35,20 @@ module RubyEventStore
|
|
33
35
|
redis_url: overriden_options.fetch(:redis_url, redis_url),
|
34
36
|
cleanup: overriden_options.fetch(:cleanup, cleanup),
|
35
37
|
cleanup_limit: overriden_options.fetch(:cleanup_limit, cleanup_limit),
|
36
|
-
sleep_on_empty: overriden_options.fetch(:sleep_on_empty, sleep_on_empty)
|
38
|
+
sleep_on_empty: overriden_options.fetch(:sleep_on_empty, sleep_on_empty),
|
39
|
+
locking: overriden_options.fetch(:locking, locking),
|
37
40
|
)
|
38
41
|
end
|
39
42
|
|
40
43
|
attr_reader :split_keys,
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
44
|
+
:message_format,
|
45
|
+
:batch_size,
|
46
|
+
:database_url,
|
47
|
+
:redis_url,
|
48
|
+
:cleanup,
|
49
|
+
:cleanup_limit,
|
50
|
+
:sleep_on_empty,
|
51
|
+
:locking
|
48
52
|
end
|
49
53
|
end
|
50
54
|
end
|
@@ -23,12 +23,13 @@ module RubyEventStore
|
|
23
23
|
@metrics = metrics
|
24
24
|
@tempo = Tempo.new(configuration.batch_size)
|
25
25
|
@consumer_uuid = consumer_uuid
|
26
|
+
@locking = configuration.locking
|
26
27
|
|
27
28
|
raise "Unknown format" if configuration.message_format != SIDEKIQ5_FORMAT
|
28
29
|
redis_config = RedisClient.config(url: configuration.redis_url)
|
29
30
|
@processor = SidekiqProcessor.new(redis_config.new_client)
|
30
31
|
|
31
|
-
@repository = Repository.new(configuration.database_url)
|
32
|
+
@repository = Repository.new(configuration.database_url, logger, metrics)
|
32
33
|
@cleanup_strategy = CleanupStrategies.build(configuration, repository)
|
33
34
|
end
|
34
35
|
|
@@ -43,56 +44,17 @@ module RubyEventStore
|
|
43
44
|
end
|
44
45
|
|
45
46
|
def handle_split(fetch_specification)
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
MAXIMUM_BATCH_FETCHES_IN_ONE_LOCK.times do
|
52
|
-
batch = retrieve_batch(fetch_specification)
|
53
|
-
break if batch.empty?
|
54
|
-
|
55
|
-
batch_result = BatchResult.empty
|
56
|
-
batch.each do |record|
|
57
|
-
handle_failure(batch_result) do
|
58
|
-
now = @clock.now.utc
|
59
|
-
processor.process(record, now)
|
60
|
-
|
61
|
-
repository.mark_as_enqueued(record, now)
|
62
|
-
something_processed |= true
|
63
|
-
batch_result.count_success!
|
64
|
-
end
|
47
|
+
repository
|
48
|
+
.with_next_batch(fetch_specification, tempo.batch_size, consumer_uuid, locking, @clock) do |record|
|
49
|
+
now = @clock.now.utc
|
50
|
+
processor.process(record, now)
|
51
|
+
repository.mark_as_enqueued(record, now)
|
65
52
|
end
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
split_key: fetch_specification.split_key,
|
72
|
-
remaining: get_remaining_count(fetch_specification)
|
73
|
-
)
|
74
|
-
|
75
|
-
logger.info "Sent #{batch_result.success_count} messages from outbox table"
|
76
|
-
|
77
|
-
refresh_successful = refresh_lock_for_process(obtained_lock)
|
78
|
-
break unless refresh_successful
|
79
|
-
end
|
80
|
-
|
81
|
-
unless something_processed
|
82
|
-
metrics.write_point_queue(
|
83
|
-
format: fetch_specification.message_format,
|
84
|
-
split_key: fetch_specification.split_key,
|
85
|
-
remaining: get_remaining_count(fetch_specification)
|
86
|
-
)
|
87
|
-
end
|
88
|
-
|
89
|
-
release_lock_for_process(fetch_specification)
|
90
|
-
|
91
|
-
cleanup(fetch_specification)
|
92
|
-
|
93
|
-
processor.after_batch
|
94
|
-
|
95
|
-
something_processed
|
53
|
+
.tap do
|
54
|
+
cleanup(fetch_specification)
|
55
|
+
processor.after_batch
|
56
|
+
end
|
57
|
+
.success_count > 0
|
96
58
|
end
|
97
59
|
|
98
60
|
private
|
@@ -103,86 +65,10 @@ module RubyEventStore
|
|
103
65
|
:processor,
|
104
66
|
:consumer_uuid,
|
105
67
|
:repository,
|
68
|
+
:locking,
|
106
69
|
:cleanup_strategy,
|
107
70
|
:tempo
|
108
71
|
|
109
|
-
def handle_failure(batch_result)
|
110
|
-
retried = false
|
111
|
-
yield
|
112
|
-
rescue RetriableRedisError => error
|
113
|
-
if retried
|
114
|
-
batch_result.count_failed!
|
115
|
-
log_error(error)
|
116
|
-
else
|
117
|
-
retried = true
|
118
|
-
retry
|
119
|
-
end
|
120
|
-
rescue => error
|
121
|
-
batch_result.count_failed!
|
122
|
-
log_error(error)
|
123
|
-
end
|
124
|
-
|
125
|
-
def log_error(e)
|
126
|
-
e.full_message.split($/).each { |line| logger.error(line) }
|
127
|
-
end
|
128
|
-
|
129
|
-
def obtain_lock_for_process(fetch_specification)
|
130
|
-
result = repository.obtain_lock_for_process(fetch_specification, consumer_uuid, clock: @clock)
|
131
|
-
case result
|
132
|
-
when :deadlocked
|
133
|
-
logger.warn "Obtaining lock for split_key '#{fetch_specification.split_key}' failed (deadlock)"
|
134
|
-
metrics.write_operation_result("obtain", "deadlocked")
|
135
|
-
false
|
136
|
-
when :lock_timeout
|
137
|
-
logger.warn "Obtaining lock for split_key '#{fetch_specification.split_key}' failed (lock timeout)"
|
138
|
-
metrics.write_operation_result("obtain", "lock_timeout")
|
139
|
-
false
|
140
|
-
when :taken
|
141
|
-
logger.debug "Obtaining lock for split_key '#{fetch_specification.split_key}' unsuccessful (taken)"
|
142
|
-
metrics.write_operation_result("obtain", "taken")
|
143
|
-
false
|
144
|
-
else
|
145
|
-
result
|
146
|
-
end
|
147
|
-
end
|
148
|
-
|
149
|
-
def release_lock_for_process(fetch_specification)
|
150
|
-
result = repository.release_lock_for_process(fetch_specification, consumer_uuid)
|
151
|
-
case result
|
152
|
-
when :deadlocked
|
153
|
-
logger.warn "Releasing lock for split_key '#{fetch_specification.split_key}' failed (deadlock)"
|
154
|
-
metrics.write_operation_result("release", "deadlocked")
|
155
|
-
when :lock_timeout
|
156
|
-
logger.warn "Releasing lock for split_key '#{fetch_specification.split_key}' failed (lock timeout)"
|
157
|
-
metrics.write_operation_result("release", "lock_timeout")
|
158
|
-
when :not_taken_by_this_process
|
159
|
-
logger.debug "Releasing lock for split_key '#{fetch_specification.split_key}' failed (not taken by this process)"
|
160
|
-
metrics.write_operation_result("release", "not_taken_by_this_process")
|
161
|
-
end
|
162
|
-
end
|
163
|
-
|
164
|
-
def refresh_lock_for_process(lock)
|
165
|
-
result = lock.refresh(clock: @clock)
|
166
|
-
case result
|
167
|
-
when :ok
|
168
|
-
return true
|
169
|
-
when :deadlocked
|
170
|
-
logger.warn "Refreshing lock for split_key '#{lock.split_key}' failed (deadlock)"
|
171
|
-
metrics.write_operation_result("refresh", "deadlocked")
|
172
|
-
return false
|
173
|
-
when :lock_timeout
|
174
|
-
logger.warn "Refreshing lock for split_key '#{lock.split_key}' failed (lock timeout)"
|
175
|
-
metrics.write_operation_result("refresh", "lock_timeout")
|
176
|
-
return false
|
177
|
-
when :stolen
|
178
|
-
logger.debug "Refreshing lock for split_key '#{lock.split_key}' unsuccessful (stolen)"
|
179
|
-
metrics.write_operation_result("refresh", "stolen")
|
180
|
-
return false
|
181
|
-
else
|
182
|
-
raise "Unexpected result #{result}"
|
183
|
-
end
|
184
|
-
end
|
185
|
-
|
186
72
|
def cleanup(fetch_specification)
|
187
73
|
result = cleanup_strategy.call(fetch_specification)
|
188
74
|
case result
|
@@ -194,14 +80,6 @@ module RubyEventStore
|
|
194
80
|
metrics.write_operation_result("cleanup", "lock_timeout")
|
195
81
|
end
|
196
82
|
end
|
197
|
-
|
198
|
-
def retrieve_batch(fetch_specification)
|
199
|
-
repository.retrieve_batch(fetch_specification, tempo.batch_size)
|
200
|
-
end
|
201
|
-
|
202
|
-
def get_remaining_count(fetch_specification)
|
203
|
-
repository.get_remaining_count(fetch_specification)
|
204
|
-
end
|
205
83
|
end
|
206
84
|
end
|
207
85
|
end
|
@@ -7,7 +7,6 @@ module RubyEventStore
|
|
7
7
|
module Metrics
|
8
8
|
class Influx
|
9
9
|
def initialize(url)
|
10
|
-
uri = URI.parse(url)
|
11
10
|
options = { url: url, async: true, time_precision: "ns" }
|
12
11
|
@influxdb_client = InfluxDB::Client.new(**options)
|
13
12
|
end
|
@@ -15,7 +14,7 @@ module RubyEventStore
|
|
15
14
|
def write_operation_result(operation, result)
|
16
15
|
write_point(
|
17
16
|
"ruby_event_store.outbox.lock",
|
18
|
-
{ values: { value: 1 }, tags: { operation: operation, result: result } }
|
17
|
+
{ values: { value: 1 }, tags: { operation: operation, result: result } },
|
19
18
|
)
|
20
19
|
end
|
21
20
|
|
@@ -26,13 +25,13 @@ module RubyEventStore
|
|
26
25
|
values: {
|
27
26
|
enqueued: enqueued,
|
28
27
|
failed: failed,
|
29
|
-
remaining: remaining
|
28
|
+
remaining: remaining,
|
30
29
|
},
|
31
30
|
tags: {
|
32
31
|
format: format,
|
33
|
-
split_key: split_key
|
34
|
-
}
|
35
|
-
}
|
32
|
+
split_key: split_key,
|
33
|
+
},
|
34
|
+
},
|
36
35
|
)
|
37
36
|
end
|
38
37
|
|
@@ -4,9 +4,11 @@ module RubyEventStore
|
|
4
4
|
module Outbox
|
5
5
|
module Metrics
|
6
6
|
class Null
|
7
|
-
def write_operation_result(operation, result)
|
7
|
+
def write_operation_result(operation, result)
|
8
|
+
end
|
8
9
|
|
9
|
-
def write_point_queue(**kwargs)
|
10
|
+
def write_point_queue(**kwargs)
|
11
|
+
end
|
10
12
|
end
|
11
13
|
end
|
12
14
|
end
|
@@ -113,27 +113,21 @@ module RubyEventStore
|
|
113
113
|
end
|
114
114
|
end
|
115
115
|
|
116
|
-
def initialize(database_url)
|
116
|
+
def initialize(database_url, logger, metrics)
|
117
|
+
@logger = logger
|
118
|
+
@metrics = metrics
|
117
119
|
::ActiveRecord::Base.establish_connection(database_url) unless ::ActiveRecord::Base.connected?
|
118
120
|
if ::ActiveRecord::Base.connection.adapter_name == "Mysql2"
|
119
121
|
::ActiveRecord::Base.connection.execute("SET SESSION innodb_lock_wait_timeout = 1;")
|
120
122
|
end
|
121
123
|
end
|
122
124
|
|
123
|
-
def
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
end
|
130
|
-
|
131
|
-
def obtain_lock_for_process(fetch_specification, process_uuid, clock:)
|
132
|
-
Lock.obtain(fetch_specification, process_uuid, clock: clock)
|
133
|
-
end
|
134
|
-
|
135
|
-
def release_lock_for_process(fetch_specification, process_uuid)
|
136
|
-
Lock.release(fetch_specification, process_uuid)
|
125
|
+
def with_next_batch(fetch_specification, batch_size, consumer_uuid, locking, clock, &block)
|
126
|
+
if locking
|
127
|
+
with_next_locking_batch(fetch_specification, batch_size, consumer_uuid, clock, &block)
|
128
|
+
else
|
129
|
+
with_next_non_locking_batch(fetch_specification, batch_size, &block)
|
130
|
+
end
|
137
131
|
end
|
138
132
|
|
139
133
|
def mark_as_enqueued(record, now)
|
@@ -150,6 +144,123 @@ module RubyEventStore
|
|
150
144
|
rescue ::ActiveRecord::LockWaitTimeout
|
151
145
|
:lock_timeout
|
152
146
|
end
|
147
|
+
|
148
|
+
private
|
149
|
+
|
150
|
+
def with_next_locking_batch(fetch_specification, batch_size, consumer_uuid, clock, &block)
|
151
|
+
BatchResult.empty.tap do |result|
|
152
|
+
obtained_lock = obtain_lock_for_process(fetch_specification, consumer_uuid, clock: clock)
|
153
|
+
case obtained_lock
|
154
|
+
when :deadlocked
|
155
|
+
logger.warn "Obtaining lock for split_key '#{fetch_specification.split_key}' failed (deadlock)"
|
156
|
+
metrics.write_operation_result("obtain", "deadlocked")
|
157
|
+
return BatchResult.empty
|
158
|
+
when :lock_timeout
|
159
|
+
logger.warn "Obtaining lock for split_key '#{fetch_specification.split_key}' failed (lock timeout)"
|
160
|
+
metrics.write_operation_result("obtain", "lock_timeout")
|
161
|
+
return BatchResult.empty
|
162
|
+
when :taken
|
163
|
+
logger.debug "Obtaining lock for split_key '#{fetch_specification.split_key}' unsuccessful (taken)"
|
164
|
+
metrics.write_operation_result("obtain", "taken")
|
165
|
+
return BatchResult.empty
|
166
|
+
end
|
167
|
+
|
168
|
+
Consumer::MAXIMUM_BATCH_FETCHES_IN_ONE_LOCK.times do
|
169
|
+
batch = retrieve_batch(fetch_specification, batch_size).to_a
|
170
|
+
break if batch.empty?
|
171
|
+
batch.each { |record| handle_execution(result) { block.call(record) } }
|
172
|
+
case (refresh_result = obtained_lock.refresh(clock: clock))
|
173
|
+
when :ok
|
174
|
+
when :deadlocked
|
175
|
+
logger.warn "Refreshing lock for split_key '#{lock.split_key}' failed (deadlock)"
|
176
|
+
metrics.write_operation_result("refresh", "deadlocked")
|
177
|
+
break
|
178
|
+
when :lock_timeout
|
179
|
+
logger.warn "Refreshing lock for split_key '#{lock.split_key}' failed (lock timeout)"
|
180
|
+
metrics.write_operation_result("refresh", "lock_timeout")
|
181
|
+
break
|
182
|
+
when :stolen
|
183
|
+
logger.debug "Refreshing lock for split_key '#{lock.split_key}' unsuccessful (stolen)"
|
184
|
+
metrics.write_operation_result("refresh", "stolen")
|
185
|
+
break
|
186
|
+
else
|
187
|
+
raise "Unexpected result #{refresh_result}"
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
case release_lock_for_process(fetch_specification, consumer_uuid)
|
192
|
+
when :deadlocked
|
193
|
+
logger.warn "Releasing lock for split_key '#{fetch_specification.split_key}' failed (deadlock)"
|
194
|
+
metrics.write_operation_result("release", "deadlocked")
|
195
|
+
when :lock_timeout
|
196
|
+
logger.warn "Releasing lock for split_key '#{fetch_specification.split_key}' failed (lock timeout)"
|
197
|
+
metrics.write_operation_result("release", "lock_timeout")
|
198
|
+
when :not_taken_by_this_process
|
199
|
+
logger.debug "Releasing lock for split_key '#{fetch_specification.split_key}' failed (not taken by this process)"
|
200
|
+
metrics.write_operation_result("release", "not_taken_by_this_process")
|
201
|
+
end
|
202
|
+
instrument_batch_result(fetch_specification, result)
|
203
|
+
end
|
204
|
+
end
|
205
|
+
|
206
|
+
def with_next_non_locking_batch(fetch_specification, batch_size, &block)
|
207
|
+
BatchResult.empty.tap do |result|
|
208
|
+
Record.transaction do
|
209
|
+
batch = retrieve_batch(fetch_specification, batch_size).lock("FOR UPDATE SKIP LOCKED")
|
210
|
+
break if batch.empty?
|
211
|
+
batch.each { |record| handle_execution(result) { block.call(record) } }
|
212
|
+
end
|
213
|
+
|
214
|
+
instrument_batch_result(fetch_specification, result)
|
215
|
+
end
|
216
|
+
end
|
217
|
+
|
218
|
+
def instrument_batch_result(fetch_specification, result)
|
219
|
+
metrics.write_point_queue(
|
220
|
+
enqueued: result.success_count,
|
221
|
+
failed: result.failed_count,
|
222
|
+
format: fetch_specification.message_format,
|
223
|
+
split_key: fetch_specification.split_key,
|
224
|
+
remaining: Record.remaining_for(fetch_specification).count,
|
225
|
+
)
|
226
|
+
|
227
|
+
logger.info "Sent #{result.success_count} messages from outbox table"
|
228
|
+
end
|
229
|
+
|
230
|
+
def handle_execution(batch_result)
|
231
|
+
retried = false
|
232
|
+
yield
|
233
|
+
batch_result.count_success!
|
234
|
+
rescue RetriableRedisError => error
|
235
|
+
if retried
|
236
|
+
batch_result.count_failed!
|
237
|
+
log_error(error)
|
238
|
+
else
|
239
|
+
retried = true
|
240
|
+
retry
|
241
|
+
end
|
242
|
+
rescue => error
|
243
|
+
batch_result.count_failed!
|
244
|
+
log_error(error)
|
245
|
+
end
|
246
|
+
|
247
|
+
def log_error(e)
|
248
|
+
e.full_message.split($/).each { |line| logger.error(line) }
|
249
|
+
end
|
250
|
+
|
251
|
+
def retrieve_batch(fetch_specification, batch_size)
|
252
|
+
Record.remaining_for(fetch_specification).order("id ASC").limit(batch_size)
|
253
|
+
end
|
254
|
+
|
255
|
+
def obtain_lock_for_process(fetch_specification, process_uuid, clock:)
|
256
|
+
Lock.obtain(fetch_specification, process_uuid, clock: clock)
|
257
|
+
end
|
258
|
+
|
259
|
+
def release_lock_for_process(fetch_specification, process_uuid)
|
260
|
+
Lock.release(fetch_specification, process_uuid)
|
261
|
+
end
|
262
|
+
|
263
|
+
attr_reader :logger, :metrics
|
153
264
|
end
|
154
265
|
end
|
155
266
|
end
|
@@ -8,12 +8,14 @@ module RubyEventStore
|
|
8
8
|
@logger = logger
|
9
9
|
@sleep_on_empty = configuration.sleep_on_empty
|
10
10
|
@split_keys = configuration.split_keys
|
11
|
+
@locking = configuration.locking
|
11
12
|
@gracefully_shutting_down = false
|
12
13
|
prepare_traps
|
13
14
|
end
|
14
15
|
|
15
16
|
def run
|
16
17
|
logger.info("Initiated RubyEventStore::Outbox v#{VERSION}")
|
18
|
+
logger.info("Using #{@locking ? "locking" : "non-locking"} mode")
|
17
19
|
logger.info("Handling split keys: #{split_keys ? split_keys.join(", ") : "(all of them)"}")
|
18
20
|
|
19
21
|
while !@gracefully_shutting_down
|
@@ -28,6 +30,7 @@ module RubyEventStore
|
|
28
30
|
end
|
29
31
|
|
30
32
|
private
|
33
|
+
|
31
34
|
attr_reader :consumer, :logger, :sleep_on_empty, :split_keys
|
32
35
|
|
33
36
|
def prepare_traps
|
@@ -20,7 +20,7 @@ module RubyEventStore
|
|
20
20
|
|
21
21
|
queue = parsed_record["queue"]
|
22
22
|
raise InvalidPayload.new("Missing queue") if queue.nil? || queue.empty?
|
23
|
-
payload = JSON.generate(parsed_record.merge({ "enqueued_at" =>
|
23
|
+
payload = JSON.generate(parsed_record.merge({ "enqueued_at" => record.created_at.to_f }))
|
24
24
|
|
25
25
|
redis.call("LPUSH", "queue:#{queue}", payload)
|
26
26
|
|
metadata
CHANGED
@@ -1,14 +1,13 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby_event_store-outbox
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0
|
4
|
+
version: 0.2.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Arkency
|
8
|
-
autorequire:
|
9
8
|
bindir: bin
|
10
9
|
cert_chain: []
|
11
|
-
date:
|
10
|
+
date: 1980-01-02 00:00:00.000000000 Z
|
12
11
|
dependencies:
|
13
12
|
- !ruby/object:Gem::Dependency
|
14
13
|
name: ruby_event_store
|
@@ -38,7 +37,6 @@ dependencies:
|
|
38
37
|
- - ">="
|
39
38
|
- !ruby/object:Gem::Version
|
40
39
|
version: '6.0'
|
41
|
-
description:
|
42
40
|
email: dev@arkency.com
|
43
41
|
executables:
|
44
42
|
- res_outbox
|
@@ -79,7 +77,6 @@ metadata:
|
|
79
77
|
source_code_uri: https://github.com/RailsEventStore/rails_event_store
|
80
78
|
bug_tracker_uri: https://github.com/RailsEventStore/rails_event_store/issues
|
81
79
|
rubygems_mfa_required: 'true'
|
82
|
-
post_install_message:
|
83
80
|
rdoc_options: []
|
84
81
|
require_paths:
|
85
82
|
- lib
|
@@ -94,8 +91,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
94
91
|
- !ruby/object:Gem::Version
|
95
92
|
version: '0'
|
96
93
|
requirements: []
|
97
|
-
rubygems_version: 3.
|
98
|
-
signing_key:
|
94
|
+
rubygems_version: 3.6.8
|
99
95
|
specification_version: 4
|
100
96
|
summary: Active Record based outbox for Ruby Event Store
|
101
97
|
test_files: []
|