sequent 8.0.2 → 8.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/db/sequent_pgsql.sql +24 -0
- data/db/sequent_schema_indexes.sql +5 -0
- data/db/sequent_schema_tables.sql +6 -0
- data/lib/sequent/core/aggregate_root.rb +3 -0
- data/lib/sequent/core/event_store.rb +32 -2
- data/lib/sequent/core/helpers/unique_keys.rb +81 -0
- data/lib/sequent/core/stream_record.rb +20 -2
- data/lib/sequent/test/command_handler_helpers.rb +93 -61
- data/lib/version.rb +1 -1
- metadata +4 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 5e7ff410ea483cffeb038904103ecad45b89c7f9907775362a18daf2a9a0a7e5
|
4
|
+
data.tar.gz: bcb33c8793653e31677e5883195dc0babc57c7259c8e4e76e7491f91272ee65f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f5acb72e36e29fcf811734b8db7ee49b7a2e2cc915fafcc675636bfcd399e8961cf514c40d8312d59ea6c289379160ac3fd8f04f45c1cba7f529e6806ca3b415
|
7
|
+
data.tar.gz: 93fa973ab3fc2454bc49b2359e254733b9ca7a850f4c30b31a08a7d14b9cd7d72889d3386e1946bc85d18c16c4f5a326fa2198f7e5b6005e0eb55f320bdf993b
|
data/db/sequent_pgsql.sql
CHANGED
@@ -135,6 +135,7 @@ DECLARE
|
|
135
135
|
_provided_events_partition_key aggregates.events_partition_key%TYPE;
|
136
136
|
_events_partition_key aggregates.events_partition_key%TYPE;
|
137
137
|
_snapshot_outdated_at aggregates_that_need_snapshots.snapshot_outdated_at%TYPE;
|
138
|
+
_unique_keys jsonb;
|
138
139
|
BEGIN
|
139
140
|
_command_id = store_command(_command);
|
140
141
|
|
@@ -159,12 +160,22 @@ BEGIN
|
|
159
160
|
ORDER BY 1
|
160
161
|
ON CONFLICT DO NOTHING;
|
161
162
|
|
163
|
+
FOR _aggregate IN SELECT row->0 FROM jsonb_array_elements(_aggregates_with_events) AS row LOOP
|
164
|
+
_aggregate_id = _aggregate->>'aggregate_id';
|
165
|
+
_unique_keys = COALESCE(_aggregate->'unique_keys', '{}'::jsonb);
|
166
|
+
|
167
|
+
DELETE FROM aggregate_unique_keys AS target
|
168
|
+
WHERE target.aggregate_id = _aggregate_id
|
169
|
+
AND NOT (_unique_keys ? target.scope);
|
170
|
+
END LOOP;
|
171
|
+
|
162
172
|
FOR _aggregate, _events IN SELECT row->0, row->1 FROM jsonb_array_elements(_aggregates_with_events) AS row
|
163
173
|
ORDER BY row->0->'aggregate_id', row->1->0->'event_json'->'sequence_number'
|
164
174
|
LOOP
|
165
175
|
_aggregate_id = _aggregate->>'aggregate_id';
|
166
176
|
_provided_events_partition_key = _aggregate->>'events_partition_key';
|
167
177
|
_snapshot_outdated_at = _aggregate->>'snapshot_outdated_at';
|
178
|
+
_unique_keys = COALESCE(_aggregate->'unique_keys', '{}'::jsonb);
|
168
179
|
|
169
180
|
SELECT * INTO _aggregate_row FROM aggregates WHERE aggregate_id = _aggregate_id;
|
170
181
|
_events_partition_key = COALESCE(_provided_events_partition_key, _aggregate_row.events_partition_key, '');
|
@@ -179,6 +190,19 @@ BEGIN
|
|
179
190
|
DO UPDATE SET events_partition_key = EXCLUDED.events_partition_key
|
180
191
|
WHERE aggregates.events_partition_key IS DISTINCT FROM EXCLUDED.events_partition_key;
|
181
192
|
|
193
|
+
BEGIN
|
194
|
+
INSERT INTO aggregate_unique_keys AS target (aggregate_id, scope, key)
|
195
|
+
SELECT _aggregate_id, key, value
|
196
|
+
FROM jsonb_each(_unique_keys) AS x
|
197
|
+
ON CONFLICT (aggregate_id, scope) DO UPDATE
|
198
|
+
SET key = EXCLUDED.key
|
199
|
+
WHERE target.key <> EXCLUDED.key;
|
200
|
+
EXCEPTION
|
201
|
+
WHEN unique_violation THEN
|
202
|
+
RAISE unique_violation
|
203
|
+
USING MESSAGE = 'duplicate unique key value for aggregate ' || (_aggregate->>'aggregate_type') || ' ' || _aggregate_id || ' (' || SQLERRM || ')';
|
204
|
+
END;
|
205
|
+
|
182
206
|
INSERT INTO events (partition_key, aggregate_id, sequence_number, created_at, command_id, event_type_id, event_json)
|
183
207
|
SELECT _events_partition_key,
|
184
208
|
_aggregate_id,
|
@@ -14,6 +14,11 @@ CREATE INDEX events_event_type_id_idx ON events (event_type_id);
|
|
14
14
|
ALTER TABLE aggregates
|
15
15
|
ADD FOREIGN KEY (aggregate_type_id) REFERENCES aggregate_types (id) ON UPDATE CASCADE;
|
16
16
|
|
17
|
+
ALTER TABLE aggregate_unique_keys
|
18
|
+
ADD PRIMARY KEY (aggregate_id, scope),
|
19
|
+
ADD UNIQUE (scope, key),
|
20
|
+
ADD FOREIGN KEY (aggregate_id) REFERENCES aggregates (aggregate_id) ON UPDATE CASCADE ON DELETE CASCADE;
|
21
|
+
|
17
22
|
ALTER TABLE events
|
18
23
|
ADD FOREIGN KEY (partition_key, aggregate_id) REFERENCES aggregates (events_partition_key, aggregate_id)
|
19
24
|
ON UPDATE CASCADE ON DELETE RESTRICT;
|
@@ -25,6 +25,12 @@ CREATE TABLE aggregates (
|
|
25
25
|
created_at timestamp with time zone NOT NULL DEFAULT NOW()
|
26
26
|
) PARTITION BY RANGE (aggregate_id);
|
27
27
|
|
28
|
+
CREATE TABLE aggregate_unique_keys (
|
29
|
+
aggregate_id uuid NOT NULL,
|
30
|
+
scope text NOT NULL,
|
31
|
+
key jsonb NOT NULL
|
32
|
+
);
|
33
|
+
|
28
34
|
CREATE TABLE events (
|
29
35
|
aggregate_id uuid NOT NULL,
|
30
36
|
partition_key text NOT NULL DEFAULT '',
|
@@ -3,6 +3,7 @@
|
|
3
3
|
require 'base64'
|
4
4
|
require_relative 'helpers/message_handler'
|
5
5
|
require_relative 'helpers/autoset_attributes'
|
6
|
+
require_relative 'helpers/unique_keys'
|
6
7
|
require_relative 'stream_record'
|
7
8
|
require_relative 'aggregate_roots'
|
8
9
|
|
@@ -37,6 +38,7 @@ module Sequent
|
|
37
38
|
class AggregateRoot
|
38
39
|
include Helpers::MessageHandler
|
39
40
|
include Helpers::AutosetAttributes
|
41
|
+
include Helpers::UniqueKeys
|
40
42
|
include SnapshotConfiguration
|
41
43
|
extend ActiveSupport::DescendantsTracker
|
42
44
|
|
@@ -104,6 +106,7 @@ module Sequent
|
|
104
106
|
aggregate_id: id,
|
105
107
|
events_partition_key: events_partition_key,
|
106
108
|
snapshot_outdated_at: snapshot_outdated? ? Time.now : nil,
|
109
|
+
unique_keys:,
|
107
110
|
)
|
108
111
|
end
|
109
112
|
|
@@ -9,6 +9,9 @@ require_relative 'snapshot_store'
|
|
9
9
|
|
10
10
|
module Sequent
|
11
11
|
module Core
|
12
|
+
class AggregateKeyNotUniqueError < RuntimeError
|
13
|
+
end
|
14
|
+
|
12
15
|
class EventStore
|
13
16
|
include Helpers::PgsqlHelpers
|
14
17
|
include SnapshotStore
|
@@ -175,6 +178,29 @@ module Sequent
|
|
175
178
|
record&.event_stream
|
176
179
|
end
|
177
180
|
|
181
|
+
def position_mark
|
182
|
+
connection.exec_query('SELECT pg_current_snapshot()::text AS mark')[0]['mark']
|
183
|
+
end
|
184
|
+
|
185
|
+
def load_events_since_marked_position(mark)
|
186
|
+
events = connection.execute(
|
187
|
+
Sequent.configuration.event_record_class
|
188
|
+
.where(<<~SQL, {mark:})
|
189
|
+
xact_id >= pg_snapshot_xmin(CAST(:mark AS pg_snapshot))::text::bigint
|
190
|
+
AND NOT pg_visible_in_snapshot(xact_id::text::xid8, CAST(:mark AS pg_snapshot))
|
191
|
+
SQL
|
192
|
+
.select('*, pg_current_snapshot()::text AS mark')
|
193
|
+
.to_sql,
|
194
|
+
).to_a
|
195
|
+
|
196
|
+
return [[], mark] if events.empty?
|
197
|
+
|
198
|
+
[
|
199
|
+
events.map { |hash| deserialize_event(hash) },
|
200
|
+
events[0]['mark'],
|
201
|
+
]
|
202
|
+
end
|
203
|
+
|
178
204
|
def permanently_delete_event_stream(aggregate_id)
|
179
205
|
permanently_delete_event_streams([aggregate_id])
|
180
206
|
end
|
@@ -250,8 +276,12 @@ module Sequent
|
|
250
276
|
Sequent::Core::Oj.dump(events),
|
251
277
|
],
|
252
278
|
)
|
253
|
-
rescue ActiveRecord::RecordNotUnique
|
254
|
-
|
279
|
+
rescue ActiveRecord::RecordNotUnique => e
|
280
|
+
if e.message =~ /duplicate unique key value for aggregate/
|
281
|
+
raise AggregateKeyNotUniqueError, e.message
|
282
|
+
else
|
283
|
+
raise OptimisticLockingError
|
284
|
+
end
|
255
285
|
end
|
256
286
|
|
257
287
|
def convert_timestamp(timestamp)
|
@@ -0,0 +1,81 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Sequent
|
4
|
+
module Core
|
5
|
+
module Helpers
|
6
|
+
# Some aggregates represent a unique external entity (e.g. a
|
7
|
+
# user's email address or login name) and this uniqueness needs
|
8
|
+
# to be enforced. For each unique key the returned object should
|
9
|
+
# have an entry where the key of the entry describes the scope
|
10
|
+
# of the constraint (e.g. `user_email` or `login_name`) and the
|
11
|
+
# value represents the unique value. Values can be any JSON
|
12
|
+
# value (string, object, array, etc). Note that uniqueness is
|
13
|
+
# enforced across all aggregate types if the same scope is used.
|
14
|
+
#
|
15
|
+
# An `AggregateKeyNotUniqueError` is raised if a unique
|
16
|
+
# constrained is violated when committing the events to the
|
17
|
+
# database.
|
18
|
+
module UniqueKeys
|
19
|
+
module ClassMethods
|
20
|
+
attr_reader :unique_key_definitions
|
21
|
+
|
22
|
+
# Defines a unique key for your aggregate. The first
|
23
|
+
# parameter is the scope of the unique constraints, followed
|
24
|
+
# by a list of attributes or keywords with blocks to produce
|
25
|
+
# the value that needs to be unique.
|
26
|
+
#
|
27
|
+
# `nil` valued keys are ignored when enforcing uniqueness.
|
28
|
+
#
|
29
|
+
# Example usage:
|
30
|
+
#
|
31
|
+
# ```
|
32
|
+
# unique_key :user_email, email: ->{ self.email&.downcase }
|
33
|
+
# ```
|
34
|
+
def unique_key(scope, *attributes, **kwargs)
|
35
|
+
fail ArgumentError, "'#{scope}' is not a symbol" unless scope.is_a?(Symbol)
|
36
|
+
fail ArgumentError, 'attributes must be symbols' unless attributes.all? { |attr| attr.is_a?(Symbol) }
|
37
|
+
|
38
|
+
@unique_key_definitions ||= {}
|
39
|
+
|
40
|
+
fail ArgumentError, "duplicate scope '#{scope}'" if @unique_key_definitions.include?(scope)
|
41
|
+
|
42
|
+
@unique_key_definitions[scope] = attributes.to_h do |attr|
|
43
|
+
[attr, -> { send(attr) }]
|
44
|
+
end.merge(
|
45
|
+
kwargs.transform_values do |attr|
|
46
|
+
attr.is_a?(Symbol) ? -> { send(attr) } : attr
|
47
|
+
end,
|
48
|
+
) do |key|
|
49
|
+
fail ArgumentError, "duplicate attribute '#{key}'"
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
# Returns the unique keys for the current instance based on
|
55
|
+
# the `unique_key` defintions. You can also override it if you
|
56
|
+
# need more compicated logic.
|
57
|
+
#
|
58
|
+
# Example return value:
|
59
|
+
#
|
60
|
+
# ```
|
61
|
+
# {
|
62
|
+
# user_email: { email: 'bob@example.com' }
|
63
|
+
# }
|
64
|
+
# ```
|
65
|
+
def unique_keys
|
66
|
+
return {} if self.class.unique_key_definitions.nil?
|
67
|
+
|
68
|
+
self.class.unique_key_definitions
|
69
|
+
&.transform_values do |attributes|
|
70
|
+
attributes.transform_values { |block| instance_exec(&block) }.compact
|
71
|
+
end
|
72
|
+
&.delete_if { |_, value| value.empty? }
|
73
|
+
end
|
74
|
+
|
75
|
+
def self.included(host_class)
|
76
|
+
host_class.extend(ClassMethods)
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
@@ -4,12 +4,28 @@ require 'active_record'
|
|
4
4
|
|
5
5
|
module Sequent
|
6
6
|
module Core
|
7
|
-
EventStream = Data.define(
|
8
|
-
|
7
|
+
EventStream = Data.define(
|
8
|
+
:aggregate_type,
|
9
|
+
:aggregate_id,
|
10
|
+
:events_partition_key,
|
11
|
+
:snapshot_outdated_at,
|
12
|
+
:unique_keys,
|
13
|
+
) do
|
14
|
+
def initialize(aggregate_type:, aggregate_id:, events_partition_key: '', snapshot_outdated_at: nil,
|
15
|
+
unique_keys: {})
|
9
16
|
super
|
10
17
|
end
|
11
18
|
end
|
12
19
|
|
20
|
+
class AggregateUniqueKey < Sequent::ApplicationRecord
|
21
|
+
self.primary_key = %i[aggregate_id scope]
|
22
|
+
self.table_name = 'aggregate_unique_keys'
|
23
|
+
|
24
|
+
validates_presence_of :aggregate_id, :scope, :key
|
25
|
+
|
26
|
+
belongs_to :stream_record, foreign_key: :aggregate_id, primary_key: :aggregate_id
|
27
|
+
end
|
28
|
+
|
13
29
|
class StreamRecord < Sequent::ApplicationRecord
|
14
30
|
self.primary_key = %i[aggregate_id]
|
15
31
|
self.table_name = 'stream_records'
|
@@ -18,12 +34,14 @@ module Sequent
|
|
18
34
|
validates_presence_of :aggregate_type, :aggregate_id
|
19
35
|
|
20
36
|
has_many :event_records, foreign_key: :aggregate_id, primary_key: :aggregate_id
|
37
|
+
has_many :aggregate_unique_keys, foreign_key: :aggregate_id, primary_key: :aggregate_id
|
21
38
|
|
22
39
|
def event_stream
|
23
40
|
EventStream.new(
|
24
41
|
aggregate_type:,
|
25
42
|
aggregate_id:,
|
26
43
|
events_partition_key:,
|
44
|
+
unique_keys: aggregate_unique_keys.to_h { |key| [key.scope.to_sym, key.key] },
|
27
45
|
)
|
28
46
|
end
|
29
47
|
|
@@ -2,6 +2,7 @@
|
|
2
2
|
|
3
3
|
require 'thread_safe'
|
4
4
|
require 'sequent/core/event_store'
|
5
|
+
require 'rspec'
|
5
6
|
|
6
7
|
module Sequent
|
7
8
|
module Test
|
@@ -14,7 +15,22 @@ module Sequent
|
|
14
15
|
# when_command PayInvoiceCommand.new(args)
|
15
16
|
# then_events InvoicePaidEvent.new(args)
|
16
17
|
#
|
17
|
-
#
|
18
|
+
# Given events are applied against the Aggregate so need to represent a correct
|
19
|
+
# sequence of events.
|
20
|
+
#
|
21
|
+
# When a command is executed all generated events are captured and can be
|
22
|
+
# retrieved using `stored_events` or tested using `then_events`.
|
23
|
+
#
|
24
|
+
# The `then_events` expects one class, expected event, or RSpec
|
25
|
+
# matcher for each generated event, in the same order. Example
|
26
|
+
# for Rspec config. When a class is passed, only the type of the
|
27
|
+
# generated event is tested. When an expected event is passed only
|
28
|
+
# the *payload* is compared using the `have_same_payload_as`
|
29
|
+
# matcher defined by this module (`aggregate_id`,
|
30
|
+
# `sequence_number`, and `created_at` are *not* compared). When an
|
31
|
+
# RSpec matcher is passed the actual event is matched against this
|
32
|
+
# matcher, so you can use `eq` or `have_attributes` to do more
|
33
|
+
# specific matching.
|
18
34
|
#
|
19
35
|
# RSpec.configure do |config|
|
20
36
|
# config.include Sequent::Test::CommandHandlerHelpers
|
@@ -39,12 +55,11 @@ module Sequent
|
|
39
55
|
# end
|
40
56
|
module CommandHandlerHelpers
|
41
57
|
class FakeEventStore
|
42
|
-
extend Forwardable
|
43
|
-
|
44
58
|
def initialize
|
45
59
|
@event_streams = {}
|
46
60
|
@all_events = {}
|
47
61
|
@stored_events = []
|
62
|
+
@unique_keys = {}
|
48
63
|
end
|
49
64
|
|
50
65
|
def load_events(aggregate_id)
|
@@ -65,11 +80,18 @@ module Sequent
|
|
65
80
|
@event_streams[aggregate_id]
|
66
81
|
end
|
67
82
|
|
68
|
-
def stored_events
|
69
|
-
deserialize_events(@stored_events)
|
70
|
-
end
|
71
|
-
|
72
83
|
def commit_events(_, streams_with_events)
|
84
|
+
keys = @unique_keys.dup.delete_if do |_key, aggregate_id|
|
85
|
+
streams_with_events.any? { |stream, _| aggregate_id == stream.aggregate_id }
|
86
|
+
end
|
87
|
+
@unique_keys = keys.merge(
|
88
|
+
*streams_with_events.map do |stream, _|
|
89
|
+
stream.unique_keys.to_h { |scope, key| [[scope, key], stream.aggregate_id] }
|
90
|
+
end,
|
91
|
+
) do |_key, id_1, id_2|
|
92
|
+
fail Sequent::Core::AggregateKeyNotUniqueError if id_1 != id_2
|
93
|
+
end
|
94
|
+
|
73
95
|
streams_with_events.each do |event_stream, events|
|
74
96
|
serialized = serialize_events(events)
|
75
97
|
@event_streams[event_stream.aggregate_id] = event_stream
|
@@ -84,11 +106,6 @@ module Sequent
|
|
84
106
|
Sequent.configuration.event_publisher.publish_events(events)
|
85
107
|
end
|
86
108
|
|
87
|
-
def given_events(events)
|
88
|
-
commit_events(nil, to_event_streams(events))
|
89
|
-
@stored_events = []
|
90
|
-
end
|
91
|
-
|
92
109
|
def stream_exists?(aggregate_id)
|
93
110
|
@event_streams.key?(aggregate_id)
|
94
111
|
end
|
@@ -97,38 +114,16 @@ module Sequent
|
|
97
114
|
@event_streams[aggregate_id].present?
|
98
115
|
end
|
99
116
|
|
100
|
-
|
101
|
-
|
102
|
-
def to_event_streams(events)
|
103
|
-
# Specs use a simple list of given events.
|
104
|
-
# We need a mapping from StreamRecord to the associated events for the event store.
|
105
|
-
streams_by_aggregate_id = {}
|
106
|
-
events.map do |event|
|
107
|
-
event_stream = streams_by_aggregate_id.fetch(event.aggregate_id) do |aggregate_id|
|
108
|
-
streams_by_aggregate_id[aggregate_id] =
|
109
|
-
find_event_stream(aggregate_id) ||
|
110
|
-
begin
|
111
|
-
aggregate_type = aggregate_type_for_event(event)
|
112
|
-
unless aggregate_type
|
113
|
-
fail <<~EOS
|
114
|
-
Cannot find aggregate type associated with creation event #{event}, did you include an event handler in your aggregate for this event?
|
115
|
-
EOS
|
116
|
-
end
|
117
|
-
|
118
|
-
Sequent::Core::EventStream.new(aggregate_type: aggregate_type.name, aggregate_id: aggregate_id)
|
119
|
-
end
|
120
|
-
end
|
121
|
-
[event_stream, [event]]
|
122
|
-
end
|
117
|
+
def position_mark
|
118
|
+
@stored_events.length
|
123
119
|
end
|
124
120
|
|
125
|
-
def
|
126
|
-
@
|
127
|
-
@event_to_aggregate_type.fetch_or_store(event.class) do |klass|
|
128
|
-
Sequent::Core::AggregateRoot.descendants.find { |x| x.message_mapping.key?(klass) }
|
129
|
-
end
|
121
|
+
def load_events_since_marked_position(mark)
|
122
|
+
[deserialize_events(@stored_events[mark..]), position_mark]
|
130
123
|
end
|
131
124
|
|
125
|
+
private
|
126
|
+
|
132
127
|
def serialize_events(events)
|
133
128
|
events.map { |event| [event.class.name, Sequent::Core::Oj.dump(event)] }
|
134
129
|
end
|
@@ -140,41 +135,78 @@ module Sequent
|
|
140
135
|
end
|
141
136
|
end
|
142
137
|
|
138
|
+
RSpec::Matchers.define :have_same_payload_as do |expected|
|
139
|
+
match do |actual|
|
140
|
+
actual_hash = Sequent::Core::Oj.strict_load(Sequent::Core::Oj.dump(actual.payload))
|
141
|
+
expected_hash = Sequent::Core::Oj.strict_load(Sequent::Core::Oj.dump(expected.payload))
|
142
|
+
values_match? expected_hash, actual_hash
|
143
|
+
end
|
144
|
+
|
145
|
+
description do
|
146
|
+
expected.to_s
|
147
|
+
end
|
148
|
+
|
149
|
+
diffable
|
150
|
+
end
|
151
|
+
|
143
152
|
def given_events(*events)
|
144
|
-
Sequent.configuration.event_store.
|
153
|
+
Sequent.configuration.event_store.commit_events(
|
154
|
+
Sequent::Core::BaseCommand.new,
|
155
|
+
to_event_streams(events.flatten(1)),
|
156
|
+
)
|
145
157
|
end
|
146
158
|
|
147
159
|
def when_command(command)
|
160
|
+
@helpers_events_position_mark = Sequent.configuration.event_store.position_mark
|
148
161
|
Sequent.configuration.command_service.execute_commands command
|
149
162
|
end
|
150
163
|
|
151
164
|
def then_events(*expected_events)
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
.event_store
|
158
|
-
.stored_events
|
159
|
-
.zip(expected_events.flatten(1))
|
160
|
-
.each_with_index do |(actual, expected), index|
|
161
|
-
next if expected.instance_of?(Class)
|
162
|
-
|
163
|
-
actual_hash = Sequent::Core::Oj.strict_load(Sequent::Core::Oj.dump(actual.payload))
|
164
|
-
expected_hash = Sequent::Core::Oj.strict_load(Sequent::Core::Oj.dump(expected.payload))
|
165
|
-
next unless expected
|
166
|
-
|
167
|
-
# rubocop:disable Layout/LineLength
|
168
|
-
expect(actual_hash)
|
169
|
-
.to eq(expected_hash),
|
170
|
-
"#{index + 1}th Event of type #{actual.class} not equal\nexpected: #{expected_hash.inspect}\n got: #{actual_hash.inspect}"
|
171
|
-
# rubocop:enable Layout/LineLength
|
165
|
+
matchers = expected_events.flatten(1).map do |expected|
|
166
|
+
if expected.is_a?(Sequent::Core::Event)
|
167
|
+
have_same_payload_as(expected)
|
168
|
+
else
|
169
|
+
expected
|
172
170
|
end
|
171
|
+
end
|
172
|
+
|
173
|
+
expect(stored_events).to match(matchers)
|
173
174
|
end
|
174
175
|
|
175
176
|
def then_no_events
|
176
177
|
then_events
|
177
178
|
end
|
179
|
+
|
180
|
+
def stored_events
|
181
|
+
Sequent.configuration.event_store.load_events_since_marked_position(@helpers_events_position_mark)[0]
|
182
|
+
end
|
183
|
+
|
184
|
+
private
|
185
|
+
|
186
|
+
def to_event_streams(uncommitted_events)
|
187
|
+
# Specs use a simple list of given events.
|
188
|
+
# We need a mapping from StreamRecord to the associated events for the event store.
|
189
|
+
uncommitted_events.group_by(&:aggregate_id).map do |aggregate_id, new_events|
|
190
|
+
_, existing_events = Sequent.configuration.event_store.load_events(aggregate_id) || [nil, []]
|
191
|
+
all_events = existing_events + new_events
|
192
|
+
aggregate_type = aggregate_type_for_event(all_events[0])
|
193
|
+
unless aggregate_type
|
194
|
+
fail <<~EOS
|
195
|
+
Cannot find aggregate type associated with creation event #{all_events[0]}, did you include an event handler in your aggregate for this event?
|
196
|
+
EOS
|
197
|
+
end
|
198
|
+
|
199
|
+
aggregate = aggregate_type.load_from_history(nil, all_events)
|
200
|
+
[aggregate.event_stream, new_events]
|
201
|
+
end
|
202
|
+
end
|
203
|
+
|
204
|
+
def aggregate_type_for_event(event)
|
205
|
+
@helpers_event_to_aggregate_type ||= ThreadSafe::Cache.new
|
206
|
+
@helpers_event_to_aggregate_type.fetch_or_store(event.class) do |klass|
|
207
|
+
Sequent::Core::AggregateRoot.descendants.find { |x| x.message_mapping.key?(klass) }
|
208
|
+
end
|
209
|
+
end
|
178
210
|
end
|
179
211
|
end
|
180
212
|
end
|
data/lib/version.rb
CHANGED
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: sequent
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 8.0
|
4
|
+
version: 8.1.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Lars Vonk
|
@@ -12,7 +12,7 @@ authors:
|
|
12
12
|
autorequire:
|
13
13
|
bindir: bin
|
14
14
|
cert_chain: []
|
15
|
-
date:
|
15
|
+
date: 1980-01-01 00:00:00.000000000 Z
|
16
16
|
dependencies:
|
17
17
|
- !ruby/object:Gem::Dependency
|
18
18
|
name: activemodel
|
@@ -437,6 +437,7 @@ files:
|
|
437
437
|
- lib/sequent/core/helpers/string_validator.rb
|
438
438
|
- lib/sequent/core/helpers/time_validator.rb
|
439
439
|
- lib/sequent/core/helpers/type_conversion_support.rb
|
440
|
+
- lib/sequent/core/helpers/unique_keys.rb
|
440
441
|
- lib/sequent/core/helpers/uuid_helper.rb
|
441
442
|
- lib/sequent/core/helpers/value_validators.rb
|
442
443
|
- lib/sequent/core/middleware/chain.rb
|
@@ -545,7 +546,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
545
546
|
- !ruby/object:Gem::Version
|
546
547
|
version: '0'
|
547
548
|
requirements: []
|
548
|
-
rubygems_version: 3.5.
|
549
|
+
rubygems_version: 3.5.22
|
549
550
|
signing_key:
|
550
551
|
specification_version: 4
|
551
552
|
summary: Event sourcing framework for Ruby
|