sequent 0.1.1 → 0.1.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/sequent/core/aggregate_repository.rb +94 -0
- data/lib/sequent/core/aggregate_root.rb +87 -0
- data/lib/sequent/core/base_command_handler.rb +39 -0
- data/lib/sequent/core/base_event_handler.rb +51 -0
- data/lib/sequent/core/command.rb +79 -0
- data/lib/sequent/core/command_record.rb +26 -0
- data/lib/sequent/core/command_service.rb +118 -0
- data/lib/sequent/core/core.rb +15 -0
- data/lib/sequent/core/event.rb +62 -0
- data/lib/sequent/core/event_record.rb +34 -0
- data/lib/sequent/core/event_store.rb +110 -0
- data/lib/sequent/core/helpers/association_validator.rb +39 -0
- data/lib/sequent/core/helpers/attribute_support.rb +207 -0
- data/lib/sequent/core/helpers/boolean_support.rb +36 -0
- data/lib/sequent/core/helpers/copyable.rb +25 -0
- data/lib/sequent/core/helpers/equal_support.rb +41 -0
- data/lib/sequent/core/helpers/helpers.rb +9 -0
- data/lib/sequent/core/helpers/mergable.rb +21 -0
- data/lib/sequent/core/helpers/param_support.rb +80 -0
- data/lib/sequent/core/helpers/self_applier.rb +45 -0
- data/lib/sequent/core/helpers/string_support.rb +22 -0
- data/lib/sequent/core/helpers/uuid_helper.rb +17 -0
- data/lib/sequent/core/record_sessions/active_record_session.rb +92 -0
- data/lib/sequent/core/record_sessions/record_sessions.rb +2 -0
- data/lib/sequent/core/record_sessions/replay_events_session.rb +306 -0
- data/lib/sequent/core/tenant_event_store.rb +18 -0
- data/lib/sequent/core/transactions/active_record_transaction_provider.rb +16 -0
- data/lib/sequent/core/transactions/no_transactions.rb +13 -0
- data/lib/sequent/core/transactions/transactions.rb +2 -0
- data/lib/sequent/core/value_object.rb +48 -0
- data/lib/sequent/migrations/migrate_events.rb +53 -0
- data/lib/sequent/migrations/migrations.rb +7 -0
- data/lib/sequent/sequent.rb +3 -0
- data/lib/sequent/test/command_handler_helpers.rb +101 -0
- data/lib/sequent/test/test.rb +1 -0
- data/lib/version.rb +3 -0
- metadata +38 -3
@@ -0,0 +1,92 @@
|
|
1
|
+
require 'active_record'
|
2
|
+
|
3
|
+
module Sequent
|
4
|
+
module Core
|
5
|
+
module RecordSessions
|
6
|
+
#
|
7
|
+
# Session objects are used to update view state
|
8
|
+
#
|
9
|
+
# The ActiveRecordSession object can be used when you use ActiveRecord as view state store.
|
10
|
+
#
|
11
|
+
class ActiveRecordSession
|
12
|
+
|
13
|
+
def update_record(record_class, event, where_clause = {aggregate_id: event.aggregate_id}, options = {}, &block)
|
14
|
+
defaults = {update_sequence_number: true}
|
15
|
+
args = defaults.merge(options)
|
16
|
+
record = record_class.unscoped.where(where_clause).first
|
17
|
+
raise("Record of class #{record_class} with where clause #{where_clause} not found while handling event #{event}") unless record
|
18
|
+
yield record if block_given?
|
19
|
+
record.sequence_number = event.sequence_number if args[:update_sequence_number]
|
20
|
+
record.updated_at = event.created_at if record.respond_to?(:updated_at)
|
21
|
+
record.save!
|
22
|
+
end
|
23
|
+
|
24
|
+
def create_record(record_class, values)
|
25
|
+
record = new_record(record_class, values)
|
26
|
+
yield record if block_given?
|
27
|
+
record.save!
|
28
|
+
record
|
29
|
+
end
|
30
|
+
|
31
|
+
def create_or_update_record(record_class, values, created_at = Time.now)
|
32
|
+
record = get_record(record_class, values)
|
33
|
+
unless record
|
34
|
+
record = new_record(record_class, values.merge(created_at: created_at))
|
35
|
+
end
|
36
|
+
yield record
|
37
|
+
record.save!
|
38
|
+
record
|
39
|
+
end
|
40
|
+
|
41
|
+
def get_record!(record_class, where_clause)
|
42
|
+
record_class.unscoped.where(where_clause).first!
|
43
|
+
end
|
44
|
+
|
45
|
+
def get_record(record_class, where_clause)
|
46
|
+
record_class.unscoped.where(where_clause).first
|
47
|
+
end
|
48
|
+
|
49
|
+
def delete_all_records(record_class, where_clause)
|
50
|
+
record_class.unscoped.where(where_clause).delete_all
|
51
|
+
end
|
52
|
+
|
53
|
+
def delete_record(_, record)
|
54
|
+
record.destroy
|
55
|
+
end
|
56
|
+
|
57
|
+
def update_all_records(record_class, where_clause, updates)
|
58
|
+
record_class.unscoped.where(where_clause).update_all(updates)
|
59
|
+
end
|
60
|
+
|
61
|
+
def do_with_records(record_class, where_clause)
|
62
|
+
record_class.unscoped.where(where_clause).each do |record|
|
63
|
+
yield record
|
64
|
+
record.save!
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
def do_with_record(record_class, where_clause)
|
69
|
+
record = record_class.unscoped.where(where_clause).first!
|
70
|
+
yield record
|
71
|
+
record.save!
|
72
|
+
end
|
73
|
+
|
74
|
+
def find_records(record_class, where_clause)
|
75
|
+
record_class.unscoped.where(where_clause)
|
76
|
+
end
|
77
|
+
|
78
|
+
def last_record(record_class, where_clause)
|
79
|
+
record_class.unscoped.where(where_clause).last
|
80
|
+
end
|
81
|
+
|
82
|
+
private
|
83
|
+
|
84
|
+
def new_record(record_class, values)
|
85
|
+
record_class.unscoped.new(values)
|
86
|
+
end
|
87
|
+
|
88
|
+
end
|
89
|
+
|
90
|
+
end
|
91
|
+
end
|
92
|
+
end
|
@@ -0,0 +1,306 @@
|
|
1
|
+
require 'set'
|
2
|
+
require 'active_record'
|
3
|
+
|
4
|
+
module Sequent
|
5
|
+
module Core
|
6
|
+
module RecordSessions
|
7
|
+
#
|
8
|
+
# Session objects are used to update view state
|
9
|
+
#
|
10
|
+
# The ReplayEventsSession is optimized for bulk loading records in a Postgres database using CSV import.
|
11
|
+
#
|
12
|
+
# After lot of experimenting this turned out to be the fastest way to to bulk inserts in the database.
|
13
|
+
# You can tweak the amount of records in the CSV via +insert_with_csv_size+ before
|
14
|
+
# it flushes to the database to gain (or loose) speed.
|
15
|
+
#
|
16
|
+
# It is highly recommended to create +indices+ on the in memory +record_store+ to speed up the processing.
|
17
|
+
# By default all records are indexed by +aggregate_id+ if they have such a property.
|
18
|
+
#
|
19
|
+
# Example:
|
20
|
+
#
|
21
|
+
# class InvoiceEventHandler < Sequent::Core::BaseEventHandler
|
22
|
+
# on RecipientMovedEvent do |event|
|
23
|
+
# update_all_records InvoiceRecord, recipient_id: event.recipient.aggregate_id do |record|
|
24
|
+
# record.recipient_street = record.recipient.street
|
25
|
+
# end
|
26
|
+
# end
|
27
|
+
# end
|
28
|
+
#
|
29
|
+
# In this case it is wise to create an index on InvoiceRecord on the recipient_id like you would in the database.
|
30
|
+
#
|
31
|
+
# Example:
|
32
|
+
#
|
33
|
+
# ReplayEventsSession.new(
|
34
|
+
# 50,
|
35
|
+
# {InvoiceRecord => [[:recipient_id]]}
|
36
|
+
# )
|
37
|
+
class ReplayEventsSession
|
38
|
+
|
39
|
+
attr_reader :record_store
|
40
|
+
attr_accessor :insert_with_csv_size
|
41
|
+
|
42
|
+
def self.struct_cache
|
43
|
+
@struct_cache ||= {}
|
44
|
+
end
|
45
|
+
|
46
|
+
module InitStruct
|
47
|
+
def set_values(values)
|
48
|
+
values.each do |k, v|
|
49
|
+
self[k] = v
|
50
|
+
end
|
51
|
+
self
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
# +insert_with_csv_size+ number of records to insert in a single batch
|
56
|
+
#
|
57
|
+
# +indices+ Hash of indices to create in memory. Greatly speeds up the replaying.
|
58
|
+
# Key corresponds to the name of the 'Record'
|
59
|
+
# Values contains list of lists on which columns to index. E.g. [[:first_index_column], [:another_index, :with_to_columns]]
|
60
|
+
def initialize(insert_with_csv_size = 50, indices = {})
|
61
|
+
@insert_with_csv_size = insert_with_csv_size
|
62
|
+
@record_store = Hash.new { |h, k| h[k] = Set.new }
|
63
|
+
@record_index = {}
|
64
|
+
@indices = indices
|
65
|
+
end
|
66
|
+
|
67
|
+
def update_record(record_class, event, where_clause = {aggregate_id: event.aggregate_id}, options = {}, &block)
|
68
|
+
defaults = {update_sequence_number: true}
|
69
|
+
args = defaults.merge(options)
|
70
|
+
record = get_record!(record_class, where_clause)
|
71
|
+
record.updated_at = event.created_at if record.respond_to?(:updated_at)
|
72
|
+
yield record if block_given?
|
73
|
+
record.sequence_number = event.sequence_number if args[:update_sequence_number]
|
74
|
+
end
|
75
|
+
|
76
|
+
def create_record(record_class, values)
|
77
|
+
column_names = record_class.column_names
|
78
|
+
values.merge!(updated_at: values[:created_at]) if column_names.include?("updated_at")
|
79
|
+
struct_class_name = "#{record_class.to_s}Struct"
|
80
|
+
if self.class.struct_cache.has_key?(struct_class_name)
|
81
|
+
struct_class = self.class.struct_cache[struct_class_name]
|
82
|
+
else
|
83
|
+
|
84
|
+
# We create a struct on the fly.
|
85
|
+
# Since the replay happens in memory we implement the ==, eql? and hash methods
|
86
|
+
# to point to the same object. A record is the same if and only if they point to
|
87
|
+
# the same object. These methods are necessary since we use Set instead of [].
|
88
|
+
class_def=<<-EOD
|
89
|
+
#{struct_class_name} = Struct.new(*#{column_names.map(&:to_sym)})
|
90
|
+
class #{struct_class_name}
|
91
|
+
include InitStruct
|
92
|
+
def ==(other)
|
93
|
+
return true if self.equal?(other)
|
94
|
+
super
|
95
|
+
end
|
96
|
+
def eql?(other)
|
97
|
+
self == other
|
98
|
+
end
|
99
|
+
def hash
|
100
|
+
self.object_id.hash
|
101
|
+
end
|
102
|
+
end
|
103
|
+
EOD
|
104
|
+
eval("#{class_def}")
|
105
|
+
struct_class = ReplayEventsSession.const_get(struct_class_name)
|
106
|
+
self.class.struct_cache[struct_class_name] = struct_class
|
107
|
+
end
|
108
|
+
record = struct_class.new.set_values(values)
|
109
|
+
|
110
|
+
yield record if block_given?
|
111
|
+
@record_store[record_class] << record
|
112
|
+
if record.respond_to? :aggregate_id
|
113
|
+
@record_index[[record_class, record.aggregate_id]] = record
|
114
|
+
end
|
115
|
+
|
116
|
+
if indexed?(record_class)
|
117
|
+
do_with_cache_keys(record_class, record) do |key|
|
118
|
+
@record_index[key] = [] unless @record_index.has_key?(key)
|
119
|
+
@record_index[key] << record
|
120
|
+
end
|
121
|
+
end
|
122
|
+
record
|
123
|
+
end
|
124
|
+
|
125
|
+
def create_or_update_record(record_class, values, created_at = Time.now)
|
126
|
+
record = get_record(record_class, values)
|
127
|
+
unless record
|
128
|
+
record = create_record(record_class, values.merge(created_at: created_at))
|
129
|
+
end
|
130
|
+
yield record if block_given?
|
131
|
+
record
|
132
|
+
end
|
133
|
+
|
134
|
+
def get_record!(record_class, where_clause)
|
135
|
+
record = get_record(record_class, where_clause)
|
136
|
+
raise("record #{record_class} not found for #{where_clause}, store: #{@record_store[record_class]}") unless record
|
137
|
+
record
|
138
|
+
end
|
139
|
+
|
140
|
+
def get_record(record_class, where_clause)
|
141
|
+
results = find_records(record_class, where_clause)
|
142
|
+
results.empty? ? nil : results.first
|
143
|
+
end
|
144
|
+
|
145
|
+
def delete_all_records(record_class, where_clause)
|
146
|
+
find_records(record_class, where_clause).each do |record|
|
147
|
+
delete_record(record_class, record)
|
148
|
+
end
|
149
|
+
end
|
150
|
+
|
151
|
+
def delete_record(record_class, record)
|
152
|
+
@record_store[record_class].delete(record)
|
153
|
+
if indexed?(record_class)
|
154
|
+
do_with_cache_keys(record_class, record) do |key|
|
155
|
+
@record_index[key].delete(record) if @record_index.has_key?(key)
|
156
|
+
end
|
157
|
+
end
|
158
|
+
end
|
159
|
+
|
160
|
+
def update_all_records(record_class, where_clause, updates)
|
161
|
+
find_records(record_class, where_clause).each do |record|
|
162
|
+
updates.each_pair do |k, v|
|
163
|
+
record[k.to_sym] = v
|
164
|
+
end
|
165
|
+
end
|
166
|
+
end
|
167
|
+
|
168
|
+
def do_with_records(record_class, where_clause)
|
169
|
+
records = find_records(record_class, where_clause)
|
170
|
+
records.each do |record|
|
171
|
+
yield record
|
172
|
+
end
|
173
|
+
end
|
174
|
+
|
175
|
+
def do_with_record(record_class, where_clause)
|
176
|
+
record = get_record!(record_class, where_clause)
|
177
|
+
yield record
|
178
|
+
end
|
179
|
+
|
180
|
+
def find_records(record_class, where_clause)
|
181
|
+
if where_clause.has_key? :aggregate_id and where_clause.size == 1
|
182
|
+
[@record_index[[record_class, where_clause[:aggregate_id]]]].compact
|
183
|
+
elsif use_index?(record_class, where_clause)
|
184
|
+
values = get_index(record_class, where_clause).map { |field| where_clause[field] }
|
185
|
+
@record_index[[record_class, *values]] || []
|
186
|
+
else
|
187
|
+
@record_store[record_class].select do |record|
|
188
|
+
where_clause.all? do |k, v|
|
189
|
+
expected_value = v.kind_of?(Symbol) ? v.to_s : v
|
190
|
+
actual_value = record[k.to_sym]
|
191
|
+
actual_value = actual_value.to_s if actual_value.kind_of? Symbol
|
192
|
+
if expected_value.kind_of?(Array)
|
193
|
+
expected_value.include?(actual_value)
|
194
|
+
else
|
195
|
+
actual_value == expected_value
|
196
|
+
end
|
197
|
+
end
|
198
|
+
|
199
|
+
end
|
200
|
+
end.dup
|
201
|
+
end
|
202
|
+
|
203
|
+
def last_record(record_class, where_clause)
|
204
|
+
results = find_records(record_class, where_clause)
|
205
|
+
results.empty? ? nil : results.last
|
206
|
+
end
|
207
|
+
|
208
|
+
def commit
|
209
|
+
begin
|
210
|
+
@record_store.each do |clazz, records|
|
211
|
+
if records.size > @insert_with_csv_size
|
212
|
+
csv = CSV.new("")
|
213
|
+
column_names = clazz.column_names.reject { |name| name == "id" }
|
214
|
+
records.each do |obj|
|
215
|
+
begin
|
216
|
+
csv << column_names.map do |column_name|
|
217
|
+
obj[column_name]
|
218
|
+
end
|
219
|
+
end
|
220
|
+
end
|
221
|
+
|
222
|
+
buf = ''
|
223
|
+
conn = ActiveRecord::Base.connection.raw_connection
|
224
|
+
copy_data = StringIO.new csv.string
|
225
|
+
conn.transaction do
|
226
|
+
conn.exec("COPY #{clazz.table_name} (#{column_names.join(",")}) FROM STDIN WITH csv")
|
227
|
+
begin
|
228
|
+
while copy_data.read(1024, buf)
|
229
|
+
### Uncomment this to test error-handling for exceptions from the reader side:
|
230
|
+
# raise Errno::ECONNRESET, "socket closed while reading"
|
231
|
+
until conn.put_copy_data(buf)
|
232
|
+
sleep 0.1
|
233
|
+
end
|
234
|
+
end
|
235
|
+
rescue Errno => err
|
236
|
+
errmsg = "%s while reading copy data: %s" % [err.class.name, err.message]
|
237
|
+
conn.put_copy_end(errmsg)
|
238
|
+
ensure
|
239
|
+
conn.put_copy_end
|
240
|
+
copy_data.close
|
241
|
+
while res = conn.get_result
|
242
|
+
status = res.res_status(res.result_status)
|
243
|
+
if status != "PGRES_COMMAND_OK"
|
244
|
+
raise "Postgres copy command failed: #{status}, #{res.error_message}"
|
245
|
+
end
|
246
|
+
end
|
247
|
+
end
|
248
|
+
end
|
249
|
+
|
250
|
+
else
|
251
|
+
|
252
|
+
clazz.unscoped do
|
253
|
+
inserts = []
|
254
|
+
column_names = clazz.column_names.reject { |name| name == "id" }
|
255
|
+
prepared_values = (1..column_names.size).map { |i| "$#{i}" }.join(",")
|
256
|
+
records.each do |r|
|
257
|
+
values = column_names.map { |name| r[name.to_sym] }
|
258
|
+
inserts << values
|
259
|
+
end
|
260
|
+
sql = %Q{insert into #{clazz.table_name} (#{column_names.join(",")}) values (#{prepared_values})}
|
261
|
+
inserts.each do |insert|
|
262
|
+
clazz.connection.raw_connection.async_exec(sql, insert)
|
263
|
+
end
|
264
|
+
end
|
265
|
+
end
|
266
|
+
end
|
267
|
+
|
268
|
+
|
269
|
+
ensure
|
270
|
+
clear
|
271
|
+
end
|
272
|
+
end
|
273
|
+
|
274
|
+
def clear
|
275
|
+
@record_store.clear
|
276
|
+
@record_index.clear
|
277
|
+
end
|
278
|
+
|
279
|
+
private
|
280
|
+
def indexed?(record_class)
|
281
|
+
@indices.has_key?(record_class)
|
282
|
+
end
|
283
|
+
|
284
|
+
def do_with_cache_keys(record_class, record)
|
285
|
+
@indices[record_class].each do |index|
|
286
|
+
cache_key = [record_class]
|
287
|
+
index.each do |key|
|
288
|
+
cache_key << record[key]
|
289
|
+
end
|
290
|
+
yield cache_key
|
291
|
+
end
|
292
|
+
end
|
293
|
+
|
294
|
+
def use_index?(record_class, where_clause)
|
295
|
+
@indices.has_key?(record_class) and @indices[record_class].any? { |indexed_where| where_clause.keys.size == indexed_where.size and (where_clause.keys - indexed_where).empty? }
|
296
|
+
end
|
297
|
+
|
298
|
+
def get_index(record_class, where_clause)
|
299
|
+
@indices[record_class].find { |indexed_where| where_clause.keys.size == indexed_where.size and (where_clause.keys - indexed_where).empty? }
|
300
|
+
end
|
301
|
+
|
302
|
+
end
|
303
|
+
|
304
|
+
end
|
305
|
+
end
|
306
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
##
|
2
|
+
# Multi-tenant event store that replays events grouped by a specific tenant.
|
3
|
+
#
|
4
|
+
module Sequent
|
5
|
+
module Core
|
6
|
+
class TenantEventStore < EventStore
|
7
|
+
|
8
|
+
def replay_events_for(organization_id)
|
9
|
+
replay_events do
|
10
|
+
aggregate_ids = @record_class.connection.select_all("select distinct aggregate_id from #{@record_class.table_name} where organization_id = '#{organization_id}'").map { |hash| hash["aggregate_id"] }
|
11
|
+
@record_class.connection.select_all("select id, event_type, event_json from #{@record_class.table_name} where aggregate_id in (#{aggregate_ids.map { |id| %Q{'#{id}'} }.join(",")}) order by id")
|
12
|
+
end
|
13
|
+
end
|
14
|
+
|
15
|
+
end
|
16
|
+
|
17
|
+
end
|
18
|
+
end
|
@@ -0,0 +1,48 @@
|
|
1
|
+
require 'active_model'
|
2
|
+
require_relative 'helpers/string_support'
|
3
|
+
require_relative 'helpers/equal_support'
|
4
|
+
require_relative 'helpers/copyable'
|
5
|
+
require_relative 'helpers/attribute_support'
|
6
|
+
require_relative 'helpers/param_support'
|
7
|
+
|
8
|
+
module Sequent
|
9
|
+
|
10
|
+
module Core
|
11
|
+
#
|
12
|
+
# ValueObject is a container for data that belongs together but requires no identity
|
13
|
+
#
|
14
|
+
# If something requires identity is up to you to decide. An example in for instance
|
15
|
+
# the invoicing domain could be a person's Address.
|
16
|
+
#
|
17
|
+
# class Address < Sequent::Core::ValueObject
|
18
|
+
# attrs street: String, city: String, country: Country
|
19
|
+
# end
|
20
|
+
#
|
21
|
+
# A ValueObject is equal to another ValueObject if and only if all +attrs+ are equal.
|
22
|
+
#
|
23
|
+
# You can copy a valueobject as follows:
|
24
|
+
#
|
25
|
+
# new_address = address.copy(street: "New Street")
|
26
|
+
#
|
27
|
+
# This a deep clone of the address with the street attribute containing "New Street"
|
28
|
+
class ValueObject
|
29
|
+
include Sequent::Core::Helpers::StringSupport,
|
30
|
+
Sequent::Core::Helpers::EqualSupport,
|
31
|
+
Sequent::Core::Helpers::Copyable,
|
32
|
+
Sequent::Core::Helpers::AttributeSupport,
|
33
|
+
Sequent::Core::Helpers::ParamSupport,
|
34
|
+
ActiveModel::Serializers::JSON,
|
35
|
+
ActiveModel::Validations
|
36
|
+
|
37
|
+
self.include_root_in_json=false
|
38
|
+
|
39
|
+
def initialize(args = {})
|
40
|
+
@errors = ActiveModel::Errors.new(self)
|
41
|
+
update_all_attributes args
|
42
|
+
end
|
43
|
+
|
44
|
+
end
|
45
|
+
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
@@ -0,0 +1,53 @@
|
|
1
|
+
##
|
2
|
+
# When you need to upgrade the event store based on information of the previous schema version
|
3
|
+
# this is the place you need to implement a migration.
|
4
|
+
# Examples are: corrupt events (due to insufficient testing for instance...)
|
5
|
+
# or adding extra events to the event stream if a new concept is introduced.
|
6
|
+
#
|
7
|
+
# To implement a migration you should create a class according to the following contract:
|
8
|
+
# module Database
|
9
|
+
# class MigrateToVersionXXX
|
10
|
+
# def initialize(env)
|
11
|
+
# @env = env
|
12
|
+
# end
|
13
|
+
#
|
14
|
+
# def migrate
|
15
|
+
# # your migration code here...
|
16
|
+
# end
|
17
|
+
# end
|
18
|
+
# end
|
19
|
+
#
|
20
|
+
module Sequent
|
21
|
+
module Migrations
|
22
|
+
class MigrateEvents
|
23
|
+
|
24
|
+
##
|
25
|
+
# @param env The string representing the current environment. E.g. "development", "production"
|
26
|
+
def initialize(env)
|
27
|
+
@env = env
|
28
|
+
end
|
29
|
+
|
30
|
+
##
|
31
|
+
#
|
32
|
+
# @param current_version The current version of the application. E.g. 10
|
33
|
+
# @param new_version The version to migrate to. E.g. 11
|
34
|
+
# @param &after_migration_block an optional block to run after the migrations run. E.g. close resources
|
35
|
+
#
|
36
|
+
def execute_migrations(current_version, new_version, &after_migration_block)
|
37
|
+
if current_version != new_version and current_version > 0
|
38
|
+
((current_version + 1)..new_version).each do |upgrade_to_version|
|
39
|
+
migration_class = "MigrateToVersion#{upgrade_to_version}".to_sym
|
40
|
+
if Kernel.const_defined?(migration_class)
|
41
|
+
begin
|
42
|
+
Kernel.const_get(migration_class).new(@env).migrate
|
43
|
+
ensure
|
44
|
+
after_migration_block.call if after_migration_block
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
@@ -0,0 +1,101 @@
|
|
1
|
+
module Sequent
|
2
|
+
module Test
|
3
|
+
##
|
4
|
+
# Use in tests
|
5
|
+
#
|
6
|
+
# This provides a nice DSL for event based testing of your CommandHandler like
|
7
|
+
#
|
8
|
+
# given_events InvoiceCreatedEvent.new(args)
|
9
|
+
# when_command PayInvoiceCommand(args)
|
10
|
+
# then_events InvoicePaidEvent(args)
|
11
|
+
#
|
12
|
+
# Example for Rspec config
|
13
|
+
#
|
14
|
+
# RSpec.configure do |config|
|
15
|
+
# config.include Sequent::Test::CommandHandlerHelpers
|
16
|
+
# end
|
17
|
+
#
|
18
|
+
# Then in a spec
|
19
|
+
#
|
20
|
+
# describe InvoiceCommandHandler do
|
21
|
+
#
|
22
|
+
# before :each do
|
23
|
+
# @event_store = Sequent::Test::CommandHandlerHelpers::FakeEventStore.new
|
24
|
+
# @repository = Sequent::Core::AggregateRepository.new(@event_store)
|
25
|
+
# @command_handler = InvoiceCommandHandler.new(@repository)
|
26
|
+
# end
|
27
|
+
#
|
28
|
+
# it "marks an invoice as paid" do
|
29
|
+
# given_events InvoiceCreatedEvent.new(args)
|
30
|
+
# when_command PayInvoiceCommand(args)
|
31
|
+
# then_events InvoicePaidEvent(args)
|
32
|
+
# end
|
33
|
+
#
|
34
|
+
# end
|
35
|
+
module CommandHandlerHelpers
|
36
|
+
|
37
|
+
class FakeEventStore
|
38
|
+
def initialize
|
39
|
+
@all_events = []
|
40
|
+
@stored_events = []
|
41
|
+
end
|
42
|
+
|
43
|
+
def load_events(aggregate_id)
|
44
|
+
deserialize_events(@all_events).select { |event| aggregate_id == event.aggregate_id }
|
45
|
+
end
|
46
|
+
|
47
|
+
def stored_events
|
48
|
+
deserialize_events(@stored_events)
|
49
|
+
end
|
50
|
+
|
51
|
+
def commit_events(_, events)
|
52
|
+
serialized = serialize_events(events)
|
53
|
+
@all_events += serialized
|
54
|
+
@stored_events += serialized
|
55
|
+
end
|
56
|
+
|
57
|
+
def given_events(events)
|
58
|
+
@all_events += serialize_events(events)
|
59
|
+
@stored_events = []
|
60
|
+
end
|
61
|
+
|
62
|
+
private
|
63
|
+
def serialize_events(events)
|
64
|
+
events.map { |event| [event.class.name.to_sym, event.to_json] }
|
65
|
+
end
|
66
|
+
|
67
|
+
def deserialize_events(events)
|
68
|
+
events.map do |type, json|
|
69
|
+
Class.const_get(type).deserialize_from_json(JSON.parse(json))
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
end
|
74
|
+
|
75
|
+
def given_events *events
|
76
|
+
raise ArgumentError.new("events can not be nil") if events.compact.empty?
|
77
|
+
@event_store.given_events(events)
|
78
|
+
end
|
79
|
+
|
80
|
+
def when_command command
|
81
|
+
raise "@command_handler is mandatory when using the #{self.class}" unless @command_handler
|
82
|
+
raise "Command handler #{@command_handler} cannot handle command #{command}, please configure the command type (forgot an include in the command class?)" unless @command_handler.handles_message?(command)
|
83
|
+
@command_handler.handle_message(command)
|
84
|
+
@repository.commit(command)
|
85
|
+
end
|
86
|
+
|
87
|
+
def then_events *events
|
88
|
+
@event_store.stored_events.map(&:class).should == events.map(&:class)
|
89
|
+
@event_store.stored_events.zip(events).each do |actual, expected|
|
90
|
+
JSON.parse(actual.payload.to_json).should == JSON.parse(expected.payload.to_json) if expected
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
def then_no_events
|
95
|
+
then_events
|
96
|
+
end
|
97
|
+
|
98
|
+
end
|
99
|
+
|
100
|
+
end
|
101
|
+
end
|
@@ -0,0 +1 @@
|
|
1
|
+
require_relative 'command_handler_helpers'
|
data/lib/version.rb
ADDED