sourced 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.rspec +3 -0
- data/CHANGELOG.md +5 -0
- data/README.md +34 -0
- data/Rakefile +8 -0
- data/config.ru +0 -0
- data/examples/cart.rb +229 -0
- data/examples/workers.rb +5 -0
- data/lib/sourced/backends/active_record_backend.rb +184 -0
- data/lib/sourced/backends/sequel_backend.rb +387 -0
- data/lib/sourced/backends/test_backend.rb +273 -0
- data/lib/sourced/command_context.rb +46 -0
- data/lib/sourced/configuration.rb +39 -0
- data/lib/sourced/consumer.rb +42 -0
- data/lib/sourced/decide.rb +50 -0
- data/lib/sourced/decider.rb +251 -0
- data/lib/sourced/evolve.rb +102 -0
- data/lib/sourced/message.rb +202 -0
- data/lib/sourced/projector.rb +131 -0
- data/lib/sourced/rails/install_generator.rb +57 -0
- data/lib/sourced/rails/railtie.rb +16 -0
- data/lib/sourced/rails/templates/bin_sors +8 -0
- data/lib/sourced/rails/templates/create_sors_tables.rb.erb +55 -0
- data/lib/sourced/react.rb +57 -0
- data/lib/sourced/router.rb +148 -0
- data/lib/sourced/supervisor.rb +49 -0
- data/lib/sourced/sync.rb +80 -0
- data/lib/sourced/types.rb +24 -0
- data/lib/sourced/version.rb +5 -0
- data/lib/sourced/worker.rb +93 -0
- data/lib/sourced.rb +42 -0
- data/sig/sors.rbs +4 -0
- metadata +103 -0
@@ -0,0 +1,387 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'sequel'
|
4
|
+
require 'json'
|
5
|
+
require 'sourced/message'
|
6
|
+
|
7
|
+
Sequel.extension :fiber_concurrency
|
8
|
+
Sequel.extension :pg_json if defined?(PG)
|
9
|
+
|
10
|
+
module Sourced
|
11
|
+
module Backends
|
12
|
+
class SequelBackend
|
13
|
+
def initialize(db, logger: Sourced.config.logger, prefix: 'sourced')
|
14
|
+
@db = connect(db)
|
15
|
+
@logger = logger
|
16
|
+
@prefix = prefix
|
17
|
+
@commands_table = table_name(:commands)
|
18
|
+
@streams_table = table_name(:streams)
|
19
|
+
@offsets_table = table_name(:offsets)
|
20
|
+
@events_table = table_name(:events)
|
21
|
+
logger.info("Connected to #{@db}")
|
22
|
+
end
|
23
|
+
|
24
|
+
def installed?
|
25
|
+
db.table_exists?(events_table) && db.table_exists?(streams_table) && db.table_exists?(offsets_table) && db.table_exists?(commands_table)
|
26
|
+
end
|
27
|
+
|
28
|
+
def schedule_commands(commands)
|
29
|
+
return false if commands.empty?
|
30
|
+
|
31
|
+
rows = commands.map { |c| serialize_command(c) }
|
32
|
+
|
33
|
+
db.transaction do
|
34
|
+
db[commands_table].multi_insert(rows)
|
35
|
+
end
|
36
|
+
true
|
37
|
+
end
|
38
|
+
|
39
|
+
# TODO: if the application raises an exception
|
40
|
+
# the command row is not deleted, so that it can be retried.
|
41
|
+
# However, if a command fails _permanently_ there's no point in keeping it in the queue,
|
42
|
+
# this ties with unresolved error handling in event handling, too.
|
43
|
+
def next_command(&reserve)
|
44
|
+
if block_given?
|
45
|
+
db.transaction do
|
46
|
+
row = db.fetch(sql_for_next_command, Time.now.utc).first
|
47
|
+
return unless row
|
48
|
+
|
49
|
+
cmd = deserialize_event(row)
|
50
|
+
yield cmd
|
51
|
+
db[commands_table].where(id: cmd.id).delete
|
52
|
+
cmd
|
53
|
+
# TODO: on failure, do we want to mark command as failed
|
54
|
+
# and put it in a dead-letter queue?
|
55
|
+
end
|
56
|
+
else
|
57
|
+
row = db[commands_table].order(:created_at).first
|
58
|
+
row ? deserialize_event(row) : nil
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
Stats = Data.define(:stream_count, :max_global_seq, :groups)
|
63
|
+
|
64
|
+
def stats
|
65
|
+
stream_count = db[streams_table].count
|
66
|
+
max_global_seq = db[events_table].max(:global_seq)
|
67
|
+
groups = db.fetch(sql_for_consumer_stats).all
|
68
|
+
Stats.new(stream_count, max_global_seq, groups)
|
69
|
+
end
|
70
|
+
|
71
|
+
def transaction(&)
|
72
|
+
db.transaction(&)
|
73
|
+
end
|
74
|
+
|
75
|
+
def append_to_stream(stream_id, events)
|
76
|
+
return if events.empty?
|
77
|
+
|
78
|
+
if events.map(&:stream_id).uniq.size > 1
|
79
|
+
raise ArgumentError, 'Events must all belong to the same stream'
|
80
|
+
end
|
81
|
+
|
82
|
+
db.transaction do
|
83
|
+
seq = events.last.seq
|
84
|
+
id = db[streams_table].insert_conflict(target: :stream_id, update: { seq:, updated_at: Time.now }).insert(stream_id:, seq:)
|
85
|
+
rows = events.map { |e| serialize_event(e, id) }
|
86
|
+
db[events_table].multi_insert(rows)
|
87
|
+
end
|
88
|
+
true
|
89
|
+
rescue Sequel::UniqueConstraintViolation => e
|
90
|
+
raise Sourced::ConcurrentAppendError, e.message
|
91
|
+
end
|
92
|
+
|
93
|
+
# @param reactor [Sourced::ReactorInterface]
|
94
|
+
def reserve_next_for_reactor(reactor, &)
|
95
|
+
group_id = reactor.consumer_info.group_id
|
96
|
+
handled_events = reactor.handled_events.map(&:type)
|
97
|
+
|
98
|
+
db.transaction do
|
99
|
+
start_from = reactor.consumer_info.start_from.call
|
100
|
+
row = if start_from.is_a?(Time)
|
101
|
+
db.fetch(sql_for_reserve_next_with_events(handled_events, true), group_id, group_id, start_from).first
|
102
|
+
else
|
103
|
+
db.fetch(sql_for_reserve_next_with_events(handled_events), group_id, group_id).first
|
104
|
+
end
|
105
|
+
return unless row
|
106
|
+
|
107
|
+
event = deserialize_event(row)
|
108
|
+
|
109
|
+
if block_given?
|
110
|
+
yield(event)
|
111
|
+
# ACK
|
112
|
+
ack_event(group_id, row[:stream_id_fk], row[:global_seq])
|
113
|
+
end
|
114
|
+
|
115
|
+
event
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
119
|
+
def ack_on(group_id, event_id, &)
|
120
|
+
db.transaction do
|
121
|
+
row = db.fetch(sql_for_ack_on, group_id, event_id).first
|
122
|
+
raise Sourced::ConcurrentAckError, "Stream for event #{event_id} is being concurrently processed by #{group_id}" unless row
|
123
|
+
|
124
|
+
yield if block_given?
|
125
|
+
|
126
|
+
ack_event(group_id, row[:stream_id_fk], row[:global_seq])
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
private def ack_event(group_id, stream_id, global_seq)
|
131
|
+
db[offsets_table]
|
132
|
+
.insert_conflict(
|
133
|
+
target: [:group_id, :stream_id],
|
134
|
+
update: { global_seq: Sequel[:excluded][:global_seq] }
|
135
|
+
)
|
136
|
+
.insert(stream_id:, group_id:, global_seq:)
|
137
|
+
end
|
138
|
+
|
139
|
+
private def base_events_query
|
140
|
+
db[events_table]
|
141
|
+
.select(
|
142
|
+
Sequel[events_table][:id],
|
143
|
+
Sequel[streams_table][:stream_id],
|
144
|
+
Sequel[events_table][:seq],
|
145
|
+
Sequel[events_table][:global_seq],
|
146
|
+
Sequel[events_table][:type],
|
147
|
+
Sequel[events_table][:created_at],
|
148
|
+
Sequel[events_table][:causation_id],
|
149
|
+
Sequel[events_table][:correlation_id],
|
150
|
+
Sequel[events_table][:metadata],
|
151
|
+
Sequel[events_table][:payload],
|
152
|
+
)
|
153
|
+
.join(streams_table, id: :stream_id)
|
154
|
+
end
|
155
|
+
|
156
|
+
def read_correlation_batch(event_id)
|
157
|
+
correlation_subquery = db[events_table]
|
158
|
+
.select(:correlation_id)
|
159
|
+
.where(id: event_id)
|
160
|
+
|
161
|
+
query = base_events_query
|
162
|
+
.where(Sequel[events_table][:correlation_id] => correlation_subquery)
|
163
|
+
.order(Sequel[events_table][:global_seq])
|
164
|
+
|
165
|
+
query.map do |row|
|
166
|
+
deserialize_event(row)
|
167
|
+
end
|
168
|
+
end
|
169
|
+
|
170
|
+
def read_event_stream(stream_id, after: nil, upto: nil)
|
171
|
+
_events_table = events_table # need local variable for Sequel block
|
172
|
+
|
173
|
+
query = base_events_query.where(Sequel[streams_table][:stream_id] => stream_id)
|
174
|
+
|
175
|
+
query = query.where { Sequel[_events_table][:seq] > after } if after
|
176
|
+
query = query.where { Sequel[_events_table][:seq] <= upto } if upto
|
177
|
+
query.order(Sequel[_events_table][:global_seq]).map do |row|
|
178
|
+
deserialize_event(row)
|
179
|
+
end
|
180
|
+
end
|
181
|
+
|
182
|
+
# For tests only
|
183
|
+
def clear!
|
184
|
+
raise 'Not in test environment' unless ENV['ENVIRONMENT'] == 'test'
|
185
|
+
# Truncate and restart global_seq increment first
|
186
|
+
db[events_table].truncate(cascade: true, only: true, restart: true)
|
187
|
+
db[events_table].delete
|
188
|
+
db[commands_table].delete
|
189
|
+
db[offsets_table].delete
|
190
|
+
db[streams_table].delete
|
191
|
+
end
|
192
|
+
|
193
|
+
def install
|
194
|
+
if @db.class.name == 'Sequel::SQLite::Database'
|
195
|
+
raise 'no SQLite support yet'
|
196
|
+
end
|
197
|
+
|
198
|
+
_streams_table = streams_table
|
199
|
+
|
200
|
+
db.create_table?(streams_table) do
|
201
|
+
primary_key :id
|
202
|
+
String :stream_id, null: false, unique: true
|
203
|
+
Time :updated_at, null: false, default: Sequel.function(:now)
|
204
|
+
Bignum :seq, null: false
|
205
|
+
end
|
206
|
+
|
207
|
+
logger.info("Created table #{streams_table}")
|
208
|
+
|
209
|
+
db.create_table?(offsets_table) do
|
210
|
+
primary_key :id
|
211
|
+
foreign_key :stream_id, _streams_table
|
212
|
+
String :group_id, null: false, index: true
|
213
|
+
Bignum :global_seq, null: false
|
214
|
+
Time :created_at, null: false, default: Sequel.function(:now)
|
215
|
+
index %i[group_id stream_id], unique: true
|
216
|
+
end
|
217
|
+
|
218
|
+
logger.info("Created table #{offsets_table}")
|
219
|
+
|
220
|
+
db.create_table?(events_table) do
|
221
|
+
primary_key :global_seq, type: :Bignum
|
222
|
+
column :id, :uuid, unique: true
|
223
|
+
foreign_key :stream_id, _streams_table
|
224
|
+
Bignum :seq, null: false
|
225
|
+
String :type, null: false
|
226
|
+
Time :created_at, null: false
|
227
|
+
column :causation_id, :uuid, index: true
|
228
|
+
column :correlation_id, :uuid
|
229
|
+
column :metadata, :jsonb
|
230
|
+
column :payload, :jsonb
|
231
|
+
index %i[stream_id seq], unique: true
|
232
|
+
end
|
233
|
+
|
234
|
+
logger.info("Created table #{events_table}")
|
235
|
+
|
236
|
+
db.create_table?(commands_table) do
|
237
|
+
column :id, :uuid, unique: true
|
238
|
+
String :stream_id, null: false, index: true
|
239
|
+
String :type, null: false
|
240
|
+
Time :created_at, null: false, index: true
|
241
|
+
column :causation_id, :uuid
|
242
|
+
column :correlation_id, :uuid
|
243
|
+
column :metadata, :jsonb
|
244
|
+
column :payload, :jsonb
|
245
|
+
end
|
246
|
+
|
247
|
+
logger.info("Created table #{commands_table}")
|
248
|
+
|
249
|
+
self
|
250
|
+
end
|
251
|
+
|
252
|
+
private
|
253
|
+
|
254
|
+
attr_reader :db, :logger, :prefix, :events_table, :streams_table, :offsets_table, :commands_table
|
255
|
+
|
256
|
+
def sql_for_next_command
|
257
|
+
<<~SQL
|
258
|
+
WITH next_command AS (
|
259
|
+
SELECT
|
260
|
+
id,
|
261
|
+
stream_id,
|
262
|
+
type,
|
263
|
+
causation_id,
|
264
|
+
correlation_id,
|
265
|
+
metadata,
|
266
|
+
payload,
|
267
|
+
created_at,
|
268
|
+
pg_try_advisory_xact_lock(hashtext(stream_id::text)) AS lock_obtained
|
269
|
+
FROM #{commands_table}
|
270
|
+
WHERE created_at <= ?
|
271
|
+
ORDER BY created_at
|
272
|
+
)
|
273
|
+
SELECT *
|
274
|
+
FROM next_command
|
275
|
+
WHERE lock_obtained = true
|
276
|
+
LIMIT 1;
|
277
|
+
SQL
|
278
|
+
end
|
279
|
+
|
280
|
+
def sql_for_reserve_next_with_events(handled_events, with_time_window = false)
|
281
|
+
event_types = handled_events.map { |e| "'#{e}'" }
|
282
|
+
event_types_sql = event_types.any? ? " AND e.type IN(#{event_types.join(',')})" : ''
|
283
|
+
time_window_sql = with_time_window ? ' AND e.created_at > ?' : ''
|
284
|
+
|
285
|
+
<<~SQL
|
286
|
+
WITH candidate_events AS (
|
287
|
+
SELECT
|
288
|
+
e.global_seq,
|
289
|
+
e.id,
|
290
|
+
e.stream_id AS stream_id_fk,
|
291
|
+
s.stream_id,
|
292
|
+
e.seq,
|
293
|
+
e.type,
|
294
|
+
e.causation_id,
|
295
|
+
e.correlation_id,
|
296
|
+
e.metadata,
|
297
|
+
e.payload,
|
298
|
+
e.created_at,
|
299
|
+
pg_try_advisory_xact_lock(hashtext(?::text), hashtext(s.id::text)) as lock_obtained
|
300
|
+
FROM #{events_table} e
|
301
|
+
JOIN #{streams_table} s ON e.stream_id = s.id
|
302
|
+
LEFT JOIN #{offsets_table} o ON o.stream_id = e.stream_id
|
303
|
+
AND o.group_id = ?
|
304
|
+
WHERE e.global_seq > COALESCE(o.global_seq, 0)#{event_types_sql}#{time_window_sql}
|
305
|
+
ORDER BY e.global_seq
|
306
|
+
)
|
307
|
+
SELECT *
|
308
|
+
FROM candidate_events
|
309
|
+
WHERE lock_obtained = true
|
310
|
+
LIMIT 1;
|
311
|
+
SQL
|
312
|
+
end
|
313
|
+
|
314
|
+
def sql_for_ack_on
|
315
|
+
<<~SQL
|
316
|
+
WITH candidate_rows AS (
|
317
|
+
SELECT
|
318
|
+
e.global_seq,
|
319
|
+
e.stream_id AS stream_id_fk,
|
320
|
+
pg_try_advisory_xact_lock(hashtext(?::text), hashtext(s.id::text)) as lock_obtained
|
321
|
+
FROM #{events_table} e
|
322
|
+
JOIN #{streams_table} s ON e.stream_id = s.id
|
323
|
+
WHERE e.id = ?
|
324
|
+
)
|
325
|
+
SELECT *
|
326
|
+
FROM candidate_rows
|
327
|
+
WHERE lock_obtained = true
|
328
|
+
LIMIT 1;
|
329
|
+
SQL
|
330
|
+
end
|
331
|
+
|
332
|
+
def sql_for_consumer_stats
|
333
|
+
@sql_for_consumer_stats ||= <<~SQL
|
334
|
+
SELECT
|
335
|
+
group_id,
|
336
|
+
min(global_seq) as oldest_processed,
|
337
|
+
max(global_seq) as newest_processed,
|
338
|
+
count(*) as stream_count
|
339
|
+
FROM #{offsets_table}
|
340
|
+
GROUP BY group_id;
|
341
|
+
SQL
|
342
|
+
end
|
343
|
+
|
344
|
+
def table_name(name)
|
345
|
+
[prefix, name].join('_').to_sym
|
346
|
+
end
|
347
|
+
|
348
|
+
def parse_json(json)
|
349
|
+
return json unless json.is_a?(String)
|
350
|
+
|
351
|
+
JSON.parse(json, symbolize_names: true)
|
352
|
+
end
|
353
|
+
|
354
|
+
def serialize_command(cmd)
|
355
|
+
row = cmd.to_h.except(:seq)
|
356
|
+
row[:metadata] = JSON.dump(row[:metadata]) if row[:metadata]
|
357
|
+
row[:payload] = JSON.dump(row[:payload]) if row[:payload]
|
358
|
+
row
|
359
|
+
end
|
360
|
+
|
361
|
+
def serialize_event(event, stream_id)
|
362
|
+
row = event.to_h
|
363
|
+
row[:stream_id] = stream_id
|
364
|
+
row[:metadata] = JSON.dump(row[:metadata]) if row[:metadata]
|
365
|
+
row[:payload] = JSON.dump(row[:payload]) if row[:payload]
|
366
|
+
row
|
367
|
+
end
|
368
|
+
|
369
|
+
def deserialize_event(row)
|
370
|
+
row[:payload] = parse_json(row[:payload]) if row[:payload]
|
371
|
+
row[:metadata] = parse_json(row[:metadata]) if row[:metadata]
|
372
|
+
Message.from(row)
|
373
|
+
end
|
374
|
+
|
375
|
+
def connect(db)
|
376
|
+
case db
|
377
|
+
when Sequel::Database
|
378
|
+
db
|
379
|
+
when String, Hash
|
380
|
+
Sequel.connect(db)
|
381
|
+
else
|
382
|
+
raise ArgumentError, "Invalid database connection: #{db.inspect}"
|
383
|
+
end
|
384
|
+
end
|
385
|
+
end
|
386
|
+
end
|
387
|
+
end
|
@@ -0,0 +1,273 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'thread'
|
4
|
+
|
5
|
+
module Sourced
|
6
|
+
module Backends
|
7
|
+
class TestBackend
|
8
|
+
class Group
|
9
|
+
attr_reader :group_id
|
10
|
+
|
11
|
+
Offset = Struct.new(:stream_id, :index, :locked)
|
12
|
+
|
13
|
+
def initialize(group_id, backend)
|
14
|
+
@group_id = group_id
|
15
|
+
@backend = backend
|
16
|
+
@offsets = {}
|
17
|
+
reindex
|
18
|
+
end
|
19
|
+
|
20
|
+
def to_h
|
21
|
+
active_offsets = @offsets.values.select { |o| o.index >= 0 }
|
22
|
+
oldest_processed = (active_offsets.min_by(&:index)&.index || -1) + 1
|
23
|
+
newest_processed = (active_offsets.max_by(&:index)&.index || -1) + 1
|
24
|
+
stream_count = active_offsets.size
|
25
|
+
|
26
|
+
{ group_id:, oldest_processed:, newest_processed:, stream_count: }
|
27
|
+
end
|
28
|
+
|
29
|
+
def reindex
|
30
|
+
backend.events.each.with_index do |e, idx|
|
31
|
+
@offsets[e.stream_id] ||= Offset.new(e.stream_id, -1, false)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
def ack_on(event_id, &)
|
36
|
+
global_seq = backend.events.find_index { |e| e.id == event_id }
|
37
|
+
return unless global_seq
|
38
|
+
|
39
|
+
evt = backend.events[global_seq]
|
40
|
+
offset = @offsets[evt.stream_id]
|
41
|
+
if offset.locked
|
42
|
+
raise Sourced::ConcurrentAckError, "Stream for event #{event_id} is being concurrently processed by #{group_id}" unless row
|
43
|
+
else
|
44
|
+
offset.locked = true
|
45
|
+
yield
|
46
|
+
offset.index = global_seq
|
47
|
+
offset.locked = false
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
NOOP_FILTER = ->(_) { true }
|
52
|
+
|
53
|
+
def reserve_next(handled_events, time_window, &)
|
54
|
+
time_filter = time_window.is_a?(Time) ? ->(e) { e.created_at > time_window } : NOOP_FILTER
|
55
|
+
evt = nil
|
56
|
+
offset = nil
|
57
|
+
index = -1
|
58
|
+
|
59
|
+
backend.events.each.with_index do |e, idx|
|
60
|
+
offset = @offsets[e.stream_id]
|
61
|
+
if offset.locked # stream locked by another consumer in the group
|
62
|
+
next
|
63
|
+
elsif idx > offset.index && handled_events.include?(e.class) && time_filter.call(e) # new event for the stream
|
64
|
+
evt = e
|
65
|
+
offset.locked = true
|
66
|
+
index = idx
|
67
|
+
break
|
68
|
+
else # event already consumed
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
if evt
|
73
|
+
if block_given?
|
74
|
+
yield(evt)
|
75
|
+
offset.index = index
|
76
|
+
end
|
77
|
+
offset.locked = false
|
78
|
+
end
|
79
|
+
evt
|
80
|
+
end
|
81
|
+
|
82
|
+
private
|
83
|
+
|
84
|
+
attr_reader :backend
|
85
|
+
end
|
86
|
+
|
87
|
+
def initialize
|
88
|
+
clear!
|
89
|
+
@mutex = Mutex.new
|
90
|
+
@in_tx = false
|
91
|
+
@tx_id = nil
|
92
|
+
end
|
93
|
+
|
94
|
+
def events = @state.events
|
95
|
+
|
96
|
+
def inspect
|
97
|
+
%(<#{self.class} events:#{events.size} streams:#{@state.events_by_stream_id.size}>)
|
98
|
+
end
|
99
|
+
|
100
|
+
class State
|
101
|
+
attr_reader :events, :commands, :groups, :events_by_correlation_id, :events_by_stream_id, :stream_id_seq_index
|
102
|
+
|
103
|
+
def initialize(
|
104
|
+
events: [],
|
105
|
+
commands: [],
|
106
|
+
groups: Hash.new { |h, k| h[k] = Group.new(k, self) },
|
107
|
+
events_by_correlation_id: Hash.new { |h, k| h[k] = [] },
|
108
|
+
events_by_stream_id: Hash.new { |h, k| h[k] = [] },
|
109
|
+
stream_id_seq_index: {}
|
110
|
+
)
|
111
|
+
|
112
|
+
@events = events
|
113
|
+
@groups = groups
|
114
|
+
@events_by_correlation_id = events_by_correlation_id
|
115
|
+
@commands = commands
|
116
|
+
@command_locks = {}
|
117
|
+
@events_by_stream_id = events_by_stream_id
|
118
|
+
@stream_id_seq_index = stream_id_seq_index
|
119
|
+
end
|
120
|
+
|
121
|
+
def schedule_commands(commands)
|
122
|
+
@commands = (@commands + commands).sort_by(&:created_at)
|
123
|
+
end
|
124
|
+
|
125
|
+
def next_command(&reserve)
|
126
|
+
now = Time.now.utc
|
127
|
+
|
128
|
+
if block_given?
|
129
|
+
return nil if @commands.empty?
|
130
|
+
idx = @commands.index do |c|
|
131
|
+
!@command_locks[c.stream_id] && c.created_at <= now
|
132
|
+
end
|
133
|
+
|
134
|
+
return nil unless idx
|
135
|
+
cmd = @commands[idx]
|
136
|
+
@command_locks[cmd.stream_id] = true
|
137
|
+
begin
|
138
|
+
yield cmd
|
139
|
+
@commands.delete_at(idx)
|
140
|
+
ensure
|
141
|
+
@command_locks.delete(cmd.stream_id)
|
142
|
+
end
|
143
|
+
cmd
|
144
|
+
else
|
145
|
+
@commands.first
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
def copy
|
150
|
+
self.class.new(
|
151
|
+
events: events.dup,
|
152
|
+
commands: commands.dup,
|
153
|
+
groups: deep_dup(groups),
|
154
|
+
events_by_correlation_id: deep_dup(events_by_correlation_id),
|
155
|
+
events_by_stream_id: deep_dup(events_by_stream_id),
|
156
|
+
stream_id_seq_index: deep_dup(stream_id_seq_index)
|
157
|
+
)
|
158
|
+
end
|
159
|
+
|
160
|
+
private def deep_dup(hash)
|
161
|
+
hash.each.with_object(hash.dup.clear) do |(k, v), new_hash|
|
162
|
+
new_hash[k] = v.dup
|
163
|
+
end
|
164
|
+
end
|
165
|
+
end
|
166
|
+
|
167
|
+
def clear!
|
168
|
+
@state = State.new
|
169
|
+
end
|
170
|
+
|
171
|
+
def installed? = true
|
172
|
+
|
173
|
+
def reserve_next_for_reactor(reactor, &)
|
174
|
+
group_id = reactor.consumer_info.group_id
|
175
|
+
start_from = reactor.consumer_info.start_from.call
|
176
|
+
transaction do
|
177
|
+
group = @state.groups[group_id]
|
178
|
+
group.reserve_next(reactor.handled_events, start_from, &)
|
179
|
+
end
|
180
|
+
end
|
181
|
+
|
182
|
+
def ack_on(group_id, event_id, &)
|
183
|
+
transaction do
|
184
|
+
group = @state.groups[group_id]
|
185
|
+
group.ack_on(event_id, &)
|
186
|
+
end
|
187
|
+
end
|
188
|
+
|
189
|
+
def schedule_commands(commands)
|
190
|
+
transaction do
|
191
|
+
@state.schedule_commands(commands)
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
195
|
+
def next_command(&)
|
196
|
+
transaction do
|
197
|
+
@state.next_command(&)
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
Stats = Data.define(:stream_count, :max_global_seq, :groups)
|
202
|
+
|
203
|
+
def stats
|
204
|
+
stream_count = @state.events_by_stream_id.size
|
205
|
+
max_global_seq = events.size
|
206
|
+
groups = @state.groups.values.map(&:to_h).filter { |g| g[:stream_count] > 0 }
|
207
|
+
Stats.new(stream_count, max_global_seq, groups)
|
208
|
+
end
|
209
|
+
|
210
|
+
def transaction(&)
|
211
|
+
if @in_tx
|
212
|
+
yield
|
213
|
+
else
|
214
|
+
@mutex.synchronize do
|
215
|
+
@in_tx = true
|
216
|
+
@state_snapshot = @state.copy
|
217
|
+
result = yield
|
218
|
+
@in_tx = false
|
219
|
+
@state_snapshot = nil
|
220
|
+
result
|
221
|
+
end
|
222
|
+
end
|
223
|
+
rescue StandardError => e
|
224
|
+
@in_tx = false
|
225
|
+
@state = @state_snapshot if @state_snapshot
|
226
|
+
raise
|
227
|
+
end
|
228
|
+
|
229
|
+
def append_to_stream(stream_id, events)
|
230
|
+
transaction do
|
231
|
+
check_unique_seq!(events)
|
232
|
+
|
233
|
+
events.each do |event|
|
234
|
+
@state.events_by_correlation_id[event.correlation_id] << event
|
235
|
+
@state.events_by_stream_id[stream_id] << event
|
236
|
+
@state.events << event
|
237
|
+
@state.stream_id_seq_index[seq_key(stream_id, event)] = true
|
238
|
+
end
|
239
|
+
end
|
240
|
+
@state.groups.each_value(&:reindex)
|
241
|
+
true
|
242
|
+
end
|
243
|
+
|
244
|
+
def read_correlation_batch(event_id)
|
245
|
+
event = @state.events.find { |e| e.id == event_id }
|
246
|
+
return [] unless event
|
247
|
+
@state.events_by_correlation_id[event.correlation_id]
|
248
|
+
end
|
249
|
+
|
250
|
+
def read_event_stream(stream_id, after: nil, upto: nil)
|
251
|
+
events = @state.events_by_stream_id[stream_id]
|
252
|
+
events = events.select { |e| e.seq > after } if after
|
253
|
+
events = events.select { |e| e.seq <= upto } if upto
|
254
|
+
events
|
255
|
+
end
|
256
|
+
|
257
|
+
private
|
258
|
+
|
259
|
+
def check_unique_seq!(events)
|
260
|
+
duplicate = events.find do |event|
|
261
|
+
@state.stream_id_seq_index[seq_key(event.stream_id, event)]
|
262
|
+
end
|
263
|
+
if duplicate
|
264
|
+
raise Sourced::ConcurrentAppendError, "Duplicate stream_id/seq: #{duplicate.stream_id}/#{duplicate.seq}"
|
265
|
+
end
|
266
|
+
end
|
267
|
+
|
268
|
+
def seq_key(stream_id, event)
|
269
|
+
[stream_id, event.seq]
|
270
|
+
end
|
271
|
+
end
|
272
|
+
end
|
273
|
+
end
|
@@ -0,0 +1,46 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'sourced/types'
|
4
|
+
|
5
|
+
module Sourced
|
6
|
+
# A command factory to instantiate commands from Hash attributes
|
7
|
+
# including extra metadata.
|
8
|
+
# @example
|
9
|
+
#
|
10
|
+
# ctx = Sourced::CommandContext.new(
|
11
|
+
# stream_id: params[:stream_id],
|
12
|
+
# metadata: {
|
13
|
+
# user_id: session[:user_id]
|
14
|
+
# }
|
15
|
+
# )
|
16
|
+
#
|
17
|
+
# # params[:command] should be a Hash with { type: String, payload: Hash | nil }
|
18
|
+
#
|
19
|
+
# cmd = ctx.build(params[:command])
|
20
|
+
# cmd.stream_id # String
|
21
|
+
# cmd.metadata[:user_id] # == session[:user_id]
|
22
|
+
#
|
23
|
+
class CommandContext
|
24
|
+
# @option stream_id [String]
|
25
|
+
# @option metadata [Hash] metadata to add to commands built by this context
|
26
|
+
# @option scope [Sourced::Message] Message class to use as command registry
|
27
|
+
def initialize(stream_id: nil, metadata: Plumb::BLANK_HASH, scope: Sourced::Command)
|
28
|
+
@defaults = {
|
29
|
+
stream_id:,
|
30
|
+
metadata:
|
31
|
+
}.freeze
|
32
|
+
@scope = scope
|
33
|
+
end
|
34
|
+
|
35
|
+
# @param attrs [Hash] attributes to lookup and buils a scope from.
|
36
|
+
# @return [Sourced::Message]
|
37
|
+
def build(attrs)
|
38
|
+
attrs = defaults.merge(Types::SymbolizedHash.parse(attrs))
|
39
|
+
scope.from(attrs)
|
40
|
+
end
|
41
|
+
|
42
|
+
private
|
43
|
+
|
44
|
+
attr_reader :defaults, :scope
|
45
|
+
end
|
46
|
+
end
|