ruby_event_store 2.3.0 → 2.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/ruby_event_store/batch_enumerator.rb +3 -3
- data/lib/ruby_event_store/broker.rb +5 -4
- data/lib/ruby_event_store/client.rb +47 -45
- data/lib/ruby_event_store/composed_dispatcher.rb +1 -3
- data/lib/ruby_event_store/correlated_commands.rb +4 -15
- data/lib/ruby_event_store/errors.rb +11 -11
- data/lib/ruby_event_store/event.rb +6 -14
- data/lib/ruby_event_store/expected_version.rb +3 -7
- data/lib/ruby_event_store/in_memory_repository.rb +42 -31
- data/lib/ruby_event_store/instrumented_dispatcher.rb +3 -4
- data/lib/ruby_event_store/instrumented_repository.rb +3 -4
- data/lib/ruby_event_store/link_by_metadata.rb +4 -21
- data/lib/ruby_event_store/mappers/default.rb +6 -4
- data/lib/ruby_event_store/mappers/encryption_key.rb +7 -16
- data/lib/ruby_event_store/mappers/encryption_mapper.rb +6 -6
- data/lib/ruby_event_store/mappers/null_mapper.rb +0 -1
- data/lib/ruby_event_store/mappers/pipeline.rb +3 -10
- data/lib/ruby_event_store/mappers/pipeline_mapper.rb +1 -0
- data/lib/ruby_event_store/mappers/transformation/domain_event.rb +21 -21
- data/lib/ruby_event_store/mappers/transformation/encryption.rb +21 -25
- data/lib/ruby_event_store/mappers/transformation/event_class_remapper.rb +6 -5
- data/lib/ruby_event_store/mappers/transformation/stringify_metadata_keys.rb +6 -5
- data/lib/ruby_event_store/mappers/transformation/symbolize_metadata_keys.rb +6 -5
- data/lib/ruby_event_store/mappers/transformation/upcast.rb +2 -6
- data/lib/ruby_event_store/metadata.rb +43 -14
- data/lib/ruby_event_store/projection.rb +10 -18
- data/lib/ruby_event_store/record.rb +14 -26
- data/lib/ruby_event_store/serialized_record.rb +14 -26
- data/lib/ruby_event_store/serializers/yaml.rb +17 -0
- data/lib/ruby_event_store/spec/broker_lint.rb +27 -17
- data/lib/ruby_event_store/spec/event_lint.rb +1 -1
- data/lib/ruby_event_store/spec/event_repository_lint.rb +513 -556
- data/lib/ruby_event_store/spec/mapper_lint.rb +2 -2
- data/lib/ruby_event_store/spec/subscriptions_lint.rb +23 -22
- data/lib/ruby_event_store/specification.rb +20 -16
- data/lib/ruby_event_store/specification_reader.rb +2 -3
- data/lib/ruby_event_store/specification_result.rb +52 -46
- data/lib/ruby_event_store/stream.rb +3 -7
- data/lib/ruby_event_store/subscriptions.rb +13 -14
- data/lib/ruby_event_store/transform_keys.rb +1 -1
- data/lib/ruby_event_store/version.rb +1 -1
- data/lib/ruby_event_store.rb +1 -0
- metadata +6 -18
@@ -2,52 +2,55 @@ module RubyEventStore
|
|
2
2
|
# @private
|
3
3
|
class SRecord
|
4
4
|
def self.new(
|
5
|
-
event_id:
|
6
|
-
data:
|
7
|
-
metadata:
|
5
|
+
event_id: SecureRandom.uuid,
|
6
|
+
data: {},
|
7
|
+
metadata: {},
|
8
8
|
event_type: "SRecordTestEvent",
|
9
|
-
timestamp:
|
10
|
-
valid_at:
|
9
|
+
timestamp: Time.new.utc,
|
10
|
+
valid_at: nil
|
11
11
|
)
|
12
12
|
Record.new(
|
13
|
-
event_id:
|
14
|
-
data:
|
15
|
-
metadata:
|
13
|
+
event_id: event_id,
|
14
|
+
data: data,
|
15
|
+
metadata: metadata,
|
16
16
|
event_type: event_type,
|
17
|
-
timestamp:
|
18
|
-
valid_at:
|
17
|
+
timestamp: timestamp.round(TIMESTAMP_PRECISION),
|
18
|
+
valid_at: (valid_at || timestamp).round(TIMESTAMP_PRECISION)
|
19
19
|
)
|
20
20
|
end
|
21
21
|
end
|
22
22
|
|
23
23
|
# @private
|
24
24
|
Type1 = Class.new(RubyEventStore::Event)
|
25
|
+
|
25
26
|
# @private
|
26
27
|
Type2 = Class.new(RubyEventStore::Event)
|
28
|
+
|
27
29
|
# @private
|
28
30
|
Type3 = Class.new(RubyEventStore::Event)
|
29
31
|
end
|
30
32
|
|
31
33
|
module RubyEventStore
|
32
34
|
::RSpec.shared_examples :event_repository do |mk_repository, helper|
|
33
|
-
let(:repository)
|
35
|
+
let(:repository) { mk_repository.call }
|
34
36
|
let(:specification) { Specification.new(SpecificationReader.new(repository, Mappers::NullMapper.new)) }
|
35
37
|
let(:global_stream) { Stream.new(GLOBAL_STREAM) }
|
36
|
-
let(:stream)
|
37
|
-
let(:stream_flow)
|
38
|
-
let(:stream_other)
|
39
|
-
let(:stream_test)
|
40
|
-
let(:version_none)
|
41
|
-
let(:version_auto)
|
42
|
-
let(:version_any)
|
43
|
-
let(:version_0)
|
44
|
-
let(:version_1)
|
45
|
-
let(:version_2)
|
46
|
-
let(:version_3)
|
38
|
+
let(:stream) { Stream.new(SecureRandom.uuid) }
|
39
|
+
let(:stream_flow) { Stream.new("flow") }
|
40
|
+
let(:stream_other) { Stream.new("other") }
|
41
|
+
let(:stream_test) { Stream.new("test") }
|
42
|
+
let(:version_none) { ExpectedVersion.none }
|
43
|
+
let(:version_auto) { ExpectedVersion.auto }
|
44
|
+
let(:version_any) { ExpectedVersion.any }
|
45
|
+
let(:version_0) { ExpectedVersion.new(0) }
|
46
|
+
let(:version_1) { ExpectedVersion.new(1) }
|
47
|
+
let(:version_2) { ExpectedVersion.new(2) }
|
48
|
+
let(:version_3) { ExpectedVersion.new(3) }
|
47
49
|
|
48
50
|
def verify_conncurency_assumptions(helper)
|
49
51
|
return unless helper.has_connection_pooling?
|
50
|
-
expect(helper.connection_pool_size).to eq(5),
|
52
|
+
expect(helper.connection_pool_size).to eq(5),
|
53
|
+
"expected connection pool of size 5, got #{helper.connection_pool_size}"
|
51
54
|
end
|
52
55
|
|
53
56
|
def read_events(repository, scope, stream = nil, from: nil, to: nil, count: nil)
|
@@ -104,64 +107,39 @@ module RubyEventStore
|
|
104
107
|
end
|
105
108
|
|
106
109
|
specify "adds multiple initial events to a new stream" do
|
107
|
-
repository.append_to_stream([
|
108
|
-
event0 = SRecord.new,
|
109
|
-
event1 = SRecord.new,
|
110
|
-
], stream, version_none)
|
110
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_none)
|
111
111
|
expect(read_events_forward(repository, count: 2)).to eq([event0, event1])
|
112
112
|
expect(read_events_forward(repository, stream)).to eq([event0, event1])
|
113
113
|
end
|
114
114
|
|
115
115
|
specify "links multiple initial events to a new stream" do
|
116
|
-
repository
|
117
|
-
event0 = SRecord.new,
|
118
|
-
event1
|
119
|
-
], stream, version_none).link_to_stream([
|
120
|
-
event0.event_id,
|
121
|
-
event1.event_id,
|
122
|
-
], stream_flow, version_none)
|
116
|
+
repository
|
117
|
+
.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_none)
|
118
|
+
.link_to_stream([event0.event_id, event1.event_id], stream_flow, version_none)
|
123
119
|
expect(read_events_forward(repository, count: 2)).to eq([event0, event1])
|
124
120
|
expect(read_events_forward(repository, stream_flow)).to eq([event0, event1])
|
125
121
|
end
|
126
122
|
|
127
123
|
specify "correct expected version on second write" do
|
128
|
-
repository.append_to_stream([
|
129
|
-
|
130
|
-
event1 = SRecord.new,
|
131
|
-
], stream, version_none)
|
132
|
-
repository.append_to_stream([
|
133
|
-
event2 = SRecord.new,
|
134
|
-
event3 = SRecord.new,
|
135
|
-
], stream, version_1)
|
124
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_none)
|
125
|
+
repository.append_to_stream([event2 = SRecord.new, event3 = SRecord.new], stream, version_1)
|
136
126
|
expect(read_events_forward(repository, count: 4)).to eq([event0, event1, event2, event3])
|
137
127
|
expect(read_events_forward(repository, stream)).to eq([event0, event1, event2, event3])
|
138
128
|
end
|
139
129
|
|
140
130
|
specify "correct expected version on second link" do
|
141
|
-
repository
|
142
|
-
event0 = SRecord.new,
|
143
|
-
|
144
|
-
|
145
|
-
event2 = SRecord.new,
|
146
|
-
event3 = SRecord.new,
|
147
|
-
], stream_flow, version_none).link_to_stream([
|
148
|
-
event0.event_id,
|
149
|
-
event1.event_id,
|
150
|
-
], stream_flow, version_1)
|
131
|
+
repository
|
132
|
+
.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_none)
|
133
|
+
.append_to_stream([event2 = SRecord.new, event3 = SRecord.new], stream_flow, version_none)
|
134
|
+
.link_to_stream([event0.event_id, event1.event_id], stream_flow, version_1)
|
151
135
|
expect(read_events_forward(repository, count: 4)).to eq([event0, event1, event2, event3])
|
152
136
|
expect(read_events_forward(repository, stream_flow)).to eq([event2, event3, event0, event1])
|
153
137
|
end
|
154
138
|
|
155
139
|
specify "incorrect expected version on second write" do
|
156
|
-
repository.append_to_stream([
|
157
|
-
event0 = SRecord.new,
|
158
|
-
event1 = SRecord.new,
|
159
|
-
], stream, version_none)
|
140
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_none)
|
160
141
|
expect do
|
161
|
-
repository.append_to_stream([
|
162
|
-
event2 = SRecord.new,
|
163
|
-
event3 = SRecord.new,
|
164
|
-
], stream, version_0)
|
142
|
+
repository.append_to_stream([event2 = SRecord.new, event3 = SRecord.new], stream, version_0)
|
165
143
|
end.to raise_error(WrongExpectedEventVersion)
|
166
144
|
|
167
145
|
expect(read_events_forward(repository, count: 4)).to eq([event0, event1])
|
@@ -169,233 +147,140 @@ module RubyEventStore
|
|
169
147
|
end
|
170
148
|
|
171
149
|
specify "incorrect expected version on second link" do
|
172
|
-
repository.append_to_stream([
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
event2 = SRecord.new,
|
178
|
-
event3 = SRecord.new,
|
179
|
-
], stream_other, version_none)
|
180
|
-
expect do
|
181
|
-
repository.link_to_stream([
|
182
|
-
event2.event_id,
|
183
|
-
event3.event_id,
|
184
|
-
], stream, version_0)
|
185
|
-
end.to raise_error(WrongExpectedEventVersion)
|
150
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_none)
|
151
|
+
repository.append_to_stream([event2 = SRecord.new, event3 = SRecord.new], stream_other, version_none)
|
152
|
+
expect { repository.link_to_stream([event2.event_id, event3.event_id], stream, version_0) }.to raise_error(
|
153
|
+
WrongExpectedEventVersion
|
154
|
+
)
|
186
155
|
|
187
156
|
expect(read_events_forward(repository, count: 4)).to eq([event0, event1, event2, event3])
|
188
157
|
expect(read_events_forward(repository, stream)).to eq([event0, event1])
|
189
158
|
end
|
190
159
|
|
191
160
|
specify ":none on first and subsequent write" do
|
192
|
-
repository.append_to_stream([
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
repository.append_to_stream([
|
197
|
-
eventB = SRecord.new,
|
198
|
-
], stream, version_none)
|
199
|
-
end.to raise_error(WrongExpectedEventVersion)
|
161
|
+
repository.append_to_stream([eventA = SRecord.new], stream, version_none)
|
162
|
+
expect { repository.append_to_stream([eventB = SRecord.new], stream, version_none) }.to raise_error(
|
163
|
+
WrongExpectedEventVersion
|
164
|
+
)
|
200
165
|
expect(read_events_forward(repository, count: 1)).to eq([eventA])
|
201
166
|
expect(read_events_forward(repository, stream)).to eq([eventA])
|
202
167
|
end
|
203
168
|
|
204
169
|
specify ":none on first and subsequent link" do
|
205
|
-
repository.append_to_stream([
|
206
|
-
eventA = SRecord.new,
|
207
|
-
eventB = SRecord.new,
|
208
|
-
], stream, version_none)
|
170
|
+
repository.append_to_stream([eventA = SRecord.new, eventB = SRecord.new], stream, version_none)
|
209
171
|
|
210
172
|
repository.link_to_stream([eventA.event_id], stream_flow, version_none)
|
211
|
-
expect
|
212
|
-
|
213
|
-
|
173
|
+
expect { repository.link_to_stream([eventB.event_id], stream_flow, version_none) }.to raise_error(
|
174
|
+
WrongExpectedEventVersion
|
175
|
+
)
|
214
176
|
|
215
177
|
expect(read_events_forward(repository, count: 1)).to eq([eventA])
|
216
178
|
expect(read_events_forward(repository, stream_flow)).to eq([eventA])
|
217
179
|
end
|
218
180
|
|
219
181
|
specify ":any allows stream with best-effort order and no guarantee" do
|
220
|
-
repository.append_to_stream([
|
221
|
-
|
222
|
-
event1 = SRecord.new,
|
223
|
-
], stream, version_any)
|
224
|
-
repository.append_to_stream([
|
225
|
-
event2 = SRecord.new,
|
226
|
-
event3 = SRecord.new,
|
227
|
-
], stream, version_any)
|
182
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_any)
|
183
|
+
repository.append_to_stream([event2 = SRecord.new, event3 = SRecord.new], stream, version_any)
|
228
184
|
expect(read_events_forward(repository, count: 4).to_set).to eq(Set.new([event0, event1, event2, event3]))
|
229
185
|
expect(read_events_forward(repository, stream).to_set).to eq(Set.new([event0, event1, event2, event3]))
|
230
186
|
end
|
231
187
|
|
232
188
|
specify ":any allows linking in stream with best-effort order and no guarantee" do
|
233
|
-
repository.append_to_stream(
|
234
|
-
event0 = SRecord.new,
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
repository.link_to_stream([
|
241
|
-
event0.event_id, event1.event_id,
|
242
|
-
], stream_flow, version_any)
|
243
|
-
repository.link_to_stream([
|
244
|
-
event2.event_id, event3.event_id,
|
245
|
-
], stream_flow, version_any)
|
189
|
+
repository.append_to_stream(
|
190
|
+
[event0 = SRecord.new, event1 = SRecord.new, event2 = SRecord.new, event3 = SRecord.new],
|
191
|
+
stream,
|
192
|
+
version_any
|
193
|
+
)
|
194
|
+
|
195
|
+
repository.link_to_stream([event0.event_id, event1.event_id], stream_flow, version_any)
|
196
|
+
repository.link_to_stream([event2.event_id, event3.event_id], stream_flow, version_any)
|
246
197
|
|
247
198
|
expect(read_events_forward(repository, count: 4).to_set).to eq(Set.new([event0, event1, event2, event3]))
|
248
199
|
expect(read_events_forward(repository, stream_flow).to_set).to eq(Set.new([event0, event1, event2, event3]))
|
249
200
|
end
|
250
201
|
|
251
202
|
specify ":auto queries for last position in given stream" do
|
252
|
-
repository.append_to_stream(
|
253
|
-
eventA = SRecord.new,
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
repository.append_to_stream([
|
258
|
-
|
259
|
-
event1 = SRecord.new,
|
260
|
-
], stream, version_auto)
|
261
|
-
repository.append_to_stream([
|
262
|
-
event2 = SRecord.new,
|
263
|
-
event3 = SRecord.new,
|
264
|
-
], stream, version_1)
|
203
|
+
repository.append_to_stream(
|
204
|
+
[eventA = SRecord.new, eventB = SRecord.new, eventC = SRecord.new],
|
205
|
+
stream_other,
|
206
|
+
version_auto
|
207
|
+
)
|
208
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_auto)
|
209
|
+
repository.append_to_stream([event2 = SRecord.new, event3 = SRecord.new], stream, version_1)
|
265
210
|
end
|
266
211
|
|
267
212
|
specify ":auto queries for last position in given stream when linking" do
|
268
|
-
repository.append_to_stream(
|
269
|
-
eventA = SRecord.new,
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
repository.append_to_stream([
|
274
|
-
|
275
|
-
event1 = SRecord.new,
|
276
|
-
], stream, version_auto)
|
277
|
-
repository.link_to_stream([
|
278
|
-
eventA.event_id,
|
279
|
-
eventB.event_id,
|
280
|
-
eventC.event_id,
|
281
|
-
], stream, version_1)
|
213
|
+
repository.append_to_stream(
|
214
|
+
[eventA = SRecord.new, eventB = SRecord.new, eventC = SRecord.new],
|
215
|
+
stream_other,
|
216
|
+
version_auto
|
217
|
+
)
|
218
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_auto)
|
219
|
+
repository.link_to_stream([eventA.event_id, eventB.event_id, eventC.event_id], stream, version_1)
|
282
220
|
end
|
283
221
|
|
284
222
|
specify ":auto starts from 0" do
|
285
|
-
repository.append_to_stream([
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
repository.append_to_stream([
|
290
|
-
event1 = SRecord.new,
|
291
|
-
], stream, version_none)
|
292
|
-
end.to raise_error(WrongExpectedEventVersion)
|
223
|
+
repository.append_to_stream([event0 = SRecord.new], stream, version_auto)
|
224
|
+
expect { repository.append_to_stream([event1 = SRecord.new], stream, version_none) }.to raise_error(
|
225
|
+
WrongExpectedEventVersion
|
226
|
+
)
|
293
227
|
end
|
294
228
|
|
295
229
|
specify ":auto linking starts from 0" do
|
296
|
-
repository.append_to_stream([
|
297
|
-
|
298
|
-
],
|
299
|
-
|
300
|
-
|
301
|
-
], stream, version_auto)
|
302
|
-
expect do
|
303
|
-
repository.append_to_stream([
|
304
|
-
event1 = SRecord.new,
|
305
|
-
], stream, version_none)
|
306
|
-
end.to raise_error(WrongExpectedEventVersion)
|
230
|
+
repository.append_to_stream([event0 = SRecord.new], stream_other, version_auto)
|
231
|
+
repository.link_to_stream([event0.event_id], stream, version_auto)
|
232
|
+
expect { repository.append_to_stream([event1 = SRecord.new], stream, version_none) }.to raise_error(
|
233
|
+
WrongExpectedEventVersion
|
234
|
+
)
|
307
235
|
end
|
308
236
|
|
309
237
|
specify ":auto queries for last position and follows in incremental way" do
|
310
238
|
# It is expected that there is higher level lock
|
311
239
|
# So this query is safe from race conditions
|
312
|
-
repository.append_to_stream([
|
313
|
-
|
314
|
-
|
315
|
-
], stream, version_auto)
|
316
|
-
repository.append_to_stream([
|
317
|
-
event2 = SRecord.new,
|
318
|
-
event3 = SRecord.new,
|
319
|
-
], stream, version_auto)
|
320
|
-
expect(read_events_forward(repository, count: 4)).to eq([
|
321
|
-
event0, event1,
|
322
|
-
event2, event3
|
323
|
-
])
|
240
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_auto)
|
241
|
+
repository.append_to_stream([event2 = SRecord.new, event3 = SRecord.new], stream, version_auto)
|
242
|
+
expect(read_events_forward(repository, count: 4)).to eq([event0, event1, event2, event3])
|
324
243
|
expect(read_events_forward(repository, stream)).to eq([event0, event1, event2, event3])
|
325
244
|
end
|
326
245
|
|
327
246
|
specify ":auto queries for last position and follows in incremental way when linking" do
|
328
|
-
repository.append_to_stream(
|
329
|
-
event0 = SRecord.new,
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
],
|
334
|
-
repository.link_to_stream([
|
335
|
-
|
336
|
-
], stream_flow, version_auto)
|
337
|
-
repository.link_to_stream([
|
338
|
-
event2.event_id, event3.event_id,
|
339
|
-
], stream_flow, version_auto)
|
340
|
-
expect(read_events_forward(repository, count: 4)).to eq([
|
341
|
-
event0, event1,
|
342
|
-
event2, event3
|
343
|
-
])
|
247
|
+
repository.append_to_stream(
|
248
|
+
[event0 = SRecord.new, event1 = SRecord.new, event2 = SRecord.new, event3 = SRecord.new],
|
249
|
+
stream,
|
250
|
+
version_auto
|
251
|
+
)
|
252
|
+
repository.link_to_stream([event0.event_id, event1.event_id], stream_flow, version_auto)
|
253
|
+
repository.link_to_stream([event2.event_id, event3.event_id], stream_flow, version_auto)
|
254
|
+
expect(read_events_forward(repository, count: 4)).to eq([event0, event1, event2, event3])
|
344
255
|
expect(read_events_forward(repository, stream_flow)).to eq([event0, event1, event2, event3])
|
345
256
|
end
|
346
257
|
|
347
258
|
specify ":auto is compatible with manual expectation" do
|
348
|
-
repository.append_to_stream([
|
349
|
-
|
350
|
-
event1 = SRecord.new,
|
351
|
-
], stream, version_auto)
|
352
|
-
repository.append_to_stream([
|
353
|
-
event2 = SRecord.new,
|
354
|
-
event3 = SRecord.new,
|
355
|
-
], stream, version_1)
|
259
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_auto)
|
260
|
+
repository.append_to_stream([event2 = SRecord.new, event3 = SRecord.new], stream, version_1)
|
356
261
|
expect(read_events_forward(repository, count: 4)).to eq([event0, event1, event2, event3])
|
357
262
|
expect(read_events_forward(repository, stream)).to eq([event0, event1, event2, event3])
|
358
263
|
end
|
359
264
|
|
360
265
|
specify ":auto is compatible with manual expectation when linking" do
|
361
|
-
repository.append_to_stream([
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
repository.
|
366
|
-
event0.event_id,
|
367
|
-
], stream_flow, version_auto)
|
368
|
-
repository.link_to_stream([
|
369
|
-
event1.event_id,
|
370
|
-
], stream_flow, version_0)
|
371
|
-
expect(read_events_forward(repository, count: 4)).to eq([event0, event1,])
|
372
|
-
expect(read_events_forward(repository, stream_flow)).to eq([event0, event1,])
|
266
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_auto)
|
267
|
+
repository.link_to_stream([event0.event_id], stream_flow, version_auto)
|
268
|
+
repository.link_to_stream([event1.event_id], stream_flow, version_0)
|
269
|
+
expect(read_events_forward(repository, count: 4)).to eq([event0, event1])
|
270
|
+
expect(read_events_forward(repository, stream_flow)).to eq([event0, event1])
|
373
271
|
end
|
374
272
|
|
375
273
|
specify "manual is compatible with auto expectation" do
|
376
|
-
repository.append_to_stream([
|
377
|
-
|
378
|
-
event1 = SRecord.new,
|
379
|
-
], stream, version_none)
|
380
|
-
repository.append_to_stream([
|
381
|
-
event2 = SRecord.new,
|
382
|
-
event3 = SRecord.new,
|
383
|
-
], stream, version_auto)
|
274
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_none)
|
275
|
+
repository.append_to_stream([event2 = SRecord.new, event3 = SRecord.new], stream, version_auto)
|
384
276
|
expect(read_events_forward(repository, count: 4)).to eq([event0, event1, event2, event3])
|
385
277
|
expect(read_events_forward(repository, stream)).to eq([event0, event1, event2, event3])
|
386
278
|
end
|
387
279
|
|
388
280
|
specify "manual is compatible with auto expectation when linking" do
|
389
|
-
repository.append_to_stream([
|
390
|
-
|
391
|
-
|
392
|
-
], stream, version_auto)
|
393
|
-
repository.link_to_stream([
|
394
|
-
event0.event_id,
|
395
|
-
], stream_flow, version_none)
|
396
|
-
repository.link_to_stream([
|
397
|
-
event1.event_id,
|
398
|
-
], stream_flow, version_auto)
|
281
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_auto)
|
282
|
+
repository.link_to_stream([event0.event_id], stream_flow, version_none)
|
283
|
+
repository.link_to_stream([event1.event_id], stream_flow, version_auto)
|
399
284
|
expect(read_events_forward(repository, count: 4)).to eq([event0, event1])
|
400
285
|
expect(read_events_forward(repository, stream_flow)).to eq([event0, event1])
|
401
286
|
end
|
@@ -405,34 +290,31 @@ module RubyEventStore
|
|
405
290
|
verify_conncurency_assumptions(helper)
|
406
291
|
begin
|
407
292
|
concurrency_level = 4
|
408
|
-
fail_occurred
|
409
|
-
wait_for_it
|
293
|
+
fail_occurred = false
|
294
|
+
wait_for_it = true
|
410
295
|
|
411
|
-
threads =
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
SRecord.new(event_id: eid),
|
419
|
-
|
296
|
+
threads =
|
297
|
+
concurrency_level.times.map do |i|
|
298
|
+
Thread.new do
|
299
|
+
true while wait_for_it
|
300
|
+
begin
|
301
|
+
100.times do |j|
|
302
|
+
eid = "0000000#{i}-#{sprintf("%04d", j)}-0000-0000-000000000000"
|
303
|
+
repository.append_to_stream([SRecord.new(event_id: eid)], stream, version_any)
|
304
|
+
end
|
305
|
+
rescue WrongExpectedEventVersion
|
306
|
+
fail_occurred = true
|
420
307
|
end
|
421
|
-
rescue WrongExpectedEventVersion
|
422
|
-
fail_occurred = true
|
423
308
|
end
|
424
309
|
end
|
425
|
-
end
|
426
310
|
wait_for_it = false
|
427
311
|
threads.each(&:join)
|
428
312
|
expect(fail_occurred).to eq(false)
|
429
313
|
expect(read_events_forward(repository, stream).size).to eq(400)
|
430
314
|
events_in_stream = read_events_forward(repository, stream)
|
431
315
|
expect(events_in_stream.size).to eq(400)
|
432
|
-
events0 = events_in_stream.select
|
433
|
-
|
434
|
-
end
|
435
|
-
expect(events0).to eq(events0.sort_by{|ev| ev.event_id })
|
316
|
+
events0 = events_in_stream.select { |ev| ev.event_id.start_with?("0-") }
|
317
|
+
expect(events0).to eq(events0.sort_by { |ev| ev.event_id })
|
436
318
|
end
|
437
319
|
end
|
438
320
|
|
@@ -441,86 +323,85 @@ module RubyEventStore
|
|
441
323
|
verify_conncurency_assumptions(helper)
|
442
324
|
begin
|
443
325
|
concurrency_level = 4
|
444
|
-
fail_occurred
|
445
|
-
wait_for_it
|
326
|
+
fail_occurred = false
|
327
|
+
wait_for_it = true
|
446
328
|
|
447
329
|
concurrency_level.times.map do |i|
|
448
330
|
100.times do |j|
|
449
331
|
eid = "0000000#{i}-#{sprintf("%04d", j)}-0000-0000-000000000000"
|
450
|
-
repository.append_to_stream([
|
451
|
-
SRecord.new(event_id: eid),
|
452
|
-
], stream, version_any)
|
332
|
+
repository.append_to_stream([SRecord.new(event_id: eid)], stream, version_any)
|
453
333
|
end
|
454
334
|
end
|
455
335
|
|
456
|
-
threads =
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
336
|
+
threads =
|
337
|
+
concurrency_level.times.map do |i|
|
338
|
+
Thread.new do
|
339
|
+
true while wait_for_it
|
340
|
+
begin
|
341
|
+
100.times do |j|
|
342
|
+
eid = "0000000#{i}-#{sprintf("%04d", j)}-0000-0000-000000000000"
|
343
|
+
repository.link_to_stream([eid], stream_flow, version_any)
|
344
|
+
end
|
345
|
+
rescue WrongExpectedEventVersion
|
346
|
+
fail_occurred = true
|
463
347
|
end
|
464
|
-
rescue WrongExpectedEventVersion
|
465
|
-
fail_occurred = true
|
466
348
|
end
|
467
349
|
end
|
468
|
-
end
|
469
350
|
wait_for_it = false
|
470
351
|
threads.each(&:join)
|
471
352
|
expect(fail_occurred).to eq(false)
|
472
353
|
expect(read_events_forward(repository, stream_flow).size).to eq(400)
|
473
354
|
events_in_stream = read_events_forward(repository, stream_flow)
|
474
355
|
expect(events_in_stream.size).to eq(400)
|
475
|
-
events0 = events_in_stream.select
|
476
|
-
|
477
|
-
end
|
478
|
-
expect(events0).to eq(events0.sort_by{|ev| ev.event_id })
|
356
|
+
events0 = events_in_stream.select { |ev| ev.event_id.start_with?("0-") }
|
357
|
+
expect(events0).to eq(events0.sort_by { |ev| ev.event_id })
|
479
358
|
end
|
480
359
|
end
|
481
360
|
|
482
|
-
specify "limited concurrency for :auto - some operations will fail without outside lock, stream is ordered",
|
361
|
+
specify "limited concurrency for :auto - some operations will fail without outside lock, stream is ordered",
|
362
|
+
mutant: false do
|
483
363
|
skip unless helper.supports_concurrent_auto?
|
484
364
|
verify_conncurency_assumptions(helper)
|
485
365
|
begin
|
486
366
|
concurrency_level = 4
|
487
367
|
|
488
368
|
fail_occurred = 0
|
489
|
-
wait_for_it
|
369
|
+
wait_for_it = true
|
490
370
|
|
491
|
-
threads =
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
SRecord.new(event_id: eid),
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
371
|
+
threads =
|
372
|
+
concurrency_level.times.map do |i|
|
373
|
+
Thread.new do
|
374
|
+
true while wait_for_it
|
375
|
+
100.times do |j|
|
376
|
+
begin
|
377
|
+
eid = "0000000#{i}-#{sprintf("%04d", j)}-0000-0000-000000000000"
|
378
|
+
repository.append_to_stream([SRecord.new(event_id: eid)], stream, version_auto)
|
379
|
+
sleep(rand(concurrency_level) / 1000.0)
|
380
|
+
rescue WrongExpectedEventVersion
|
381
|
+
fail_occurred += 1
|
382
|
+
end
|
503
383
|
end
|
504
384
|
end
|
505
385
|
end
|
506
|
-
end
|
507
386
|
wait_for_it = false
|
508
387
|
threads.each(&:join)
|
509
388
|
expect(fail_occurred).to be > 0
|
510
389
|
events_in_stream = read_events_forward(repository, stream)
|
511
390
|
expect(events_in_stream.size).to be < 400
|
512
391
|
expect(events_in_stream.size).to be >= 100
|
513
|
-
events0 = events_in_stream.select
|
514
|
-
|
515
|
-
end
|
516
|
-
expect(events0).to eq(events0.sort_by{|ev| ev.event_id })
|
392
|
+
events0 = events_in_stream.select { |ev| ev.event_id.start_with?("0-") }
|
393
|
+
expect(events0).to eq(events0.sort_by { |ev| ev.event_id })
|
517
394
|
|
518
|
-
positions =
|
395
|
+
positions =
|
396
|
+
repository
|
397
|
+
.read(specification.stream(stream.name).result)
|
398
|
+
.map { |r| repository.position_in_stream(r.event_id, stream) }
|
519
399
|
expect(positions).to eq((0...positions.size).to_a)
|
520
400
|
end
|
521
401
|
end
|
522
402
|
|
523
|
-
specify "limited concurrency for :auto - some operations will fail without outside lock, stream is ordered",
|
403
|
+
specify "limited concurrency for :auto - some operations will fail without outside lock, stream is ordered",
|
404
|
+
mutant: false do
|
524
405
|
skip unless helper.supports_concurrent_auto?
|
525
406
|
verify_conncurency_assumptions(helper)
|
526
407
|
begin
|
@@ -529,41 +410,41 @@ module RubyEventStore
|
|
529
410
|
concurrency_level.times.map do |i|
|
530
411
|
100.times do |j|
|
531
412
|
eid = "0000000#{i}-#{sprintf("%04d", j)}-0000-0000-000000000000"
|
532
|
-
repository.append_to_stream([
|
533
|
-
SRecord.new(event_id: eid),
|
534
|
-
], stream_other, version_any)
|
413
|
+
repository.append_to_stream([SRecord.new(event_id: eid)], stream_other, version_any)
|
535
414
|
end
|
536
415
|
end
|
537
416
|
|
538
417
|
fail_occurred = 0
|
539
|
-
wait_for_it
|
418
|
+
wait_for_it = true
|
540
419
|
|
541
|
-
threads =
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
|
549
|
-
|
550
|
-
|
420
|
+
threads =
|
421
|
+
concurrency_level.times.map do |i|
|
422
|
+
Thread.new do
|
423
|
+
true while wait_for_it
|
424
|
+
100.times do |j|
|
425
|
+
begin
|
426
|
+
eid = "0000000#{i}-#{sprintf("%04d", j)}-0000-0000-000000000000"
|
427
|
+
repository.link_to_stream([eid], stream, version_auto)
|
428
|
+
sleep(rand(concurrency_level) / 1000.0)
|
429
|
+
rescue WrongExpectedEventVersion
|
430
|
+
fail_occurred += 1
|
431
|
+
end
|
551
432
|
end
|
552
433
|
end
|
553
434
|
end
|
554
|
-
end
|
555
435
|
wait_for_it = false
|
556
436
|
threads.each(&:join)
|
557
437
|
expect(fail_occurred).to be > 0
|
558
438
|
events_in_stream = read_events_forward(repository, stream)
|
559
439
|
expect(events_in_stream.size).to be < 400
|
560
440
|
expect(events_in_stream.size).to be >= 100
|
561
|
-
events0 = events_in_stream.select
|
562
|
-
|
563
|
-
end
|
564
|
-
expect(events0).to eq(events0.sort_by{|ev| ev.event_id })
|
441
|
+
events0 = events_in_stream.select { |ev| ev.event_id.start_with?("0-") }
|
442
|
+
expect(events0).to eq(events0.sort_by { |ev| ev.event_id })
|
565
443
|
|
566
|
-
positions =
|
444
|
+
positions =
|
445
|
+
repository
|
446
|
+
.read(specification.stream(stream.name).result)
|
447
|
+
.map { |r| repository.position_in_stream(r.event_id, stream) }
|
567
448
|
expect(positions).to eq((0...positions.size).to_a)
|
568
449
|
end
|
569
450
|
end
|
@@ -591,10 +472,7 @@ module RubyEventStore
|
|
591
472
|
end
|
592
473
|
|
593
474
|
it "data and metadata attributes are retrieved when linking" do
|
594
|
-
event = SRecord.new(
|
595
|
-
data: { "order_id" => 3 },
|
596
|
-
metadata: { "request_id" => 4},
|
597
|
-
)
|
475
|
+
event = SRecord.new(data: { "order_id" => 3 }, metadata: { "request_id" => 4 })
|
598
476
|
repository
|
599
477
|
.append_to_stream([event], stream, version_any)
|
600
478
|
.link_to_stream([event.event_id], stream_flow, version_any)
|
@@ -611,7 +489,7 @@ module RubyEventStore
|
|
611
489
|
repository.delete_stream(stream)
|
612
490
|
expect(read_events_forward(repository, stream)).to be_empty
|
613
491
|
expect(read_events_forward(repository, stream_other)).to eq([e2])
|
614
|
-
expect(read_events_forward(repository, count: 10)).to eq([e1,e2])
|
492
|
+
expect(read_events_forward(repository, count: 10)).to eq([e1, e2])
|
615
493
|
end
|
616
494
|
|
617
495
|
it "does not have deleted streams with linked events" do
|
@@ -639,10 +517,7 @@ module RubyEventStore
|
|
639
517
|
|
640
518
|
it "#position_in_stream happy path" do
|
641
519
|
skip unless helper.supports_position_queries?
|
642
|
-
repository.append_to_stream([
|
643
|
-
event0 = SRecord.new,
|
644
|
-
event1 = SRecord.new
|
645
|
-
], stream, version_auto)
|
520
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_auto)
|
646
521
|
|
647
522
|
expect(repository.position_in_stream(event0.event_id, stream)).to eq(0)
|
648
523
|
expect(repository.position_in_stream(event1.event_id, stream)).to eq(1)
|
@@ -650,14 +525,8 @@ module RubyEventStore
|
|
650
525
|
|
651
526
|
it "#position_in_stream happy path with linking" do
|
652
527
|
skip unless helper.supports_position_queries?
|
653
|
-
repository.append_to_stream([
|
654
|
-
|
655
|
-
event1 = SRecord.new
|
656
|
-
], stream, version_auto)
|
657
|
-
repository.link_to_stream([
|
658
|
-
event1.event_id,
|
659
|
-
event0.event_id,
|
660
|
-
], stream_other, version_auto)
|
528
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_auto)
|
529
|
+
repository.link_to_stream([event1.event_id, event0.event_id], stream_other, version_auto)
|
661
530
|
|
662
531
|
expect(repository.position_in_stream(event0.event_id, stream)).to eq(0)
|
663
532
|
expect(repository.position_in_stream(event1.event_id, stream)).to eq(1)
|
@@ -669,9 +538,7 @@ module RubyEventStore
|
|
669
538
|
skip unless helper.supports_position_queries?
|
670
539
|
just_an_id = "d5c134c2-db65-4e87-b6ea-d196f8f1a292"
|
671
540
|
|
672
|
-
expect
|
673
|
-
repository.position_in_stream(just_an_id, stream)
|
674
|
-
end.to raise_error(EventNotFoundInStream)
|
541
|
+
expect { repository.position_in_stream(just_an_id, stream) }.to raise_error(EventNotFoundInStream)
|
675
542
|
end
|
676
543
|
|
677
544
|
it "#position_in_stream when event is published without position" do
|
@@ -683,10 +550,7 @@ module RubyEventStore
|
|
683
550
|
|
684
551
|
it "#global_position happy path" do
|
685
552
|
skip unless helper.supports_position_queries?
|
686
|
-
repository.append_to_stream([
|
687
|
-
event0 = SRecord.new,
|
688
|
-
event1 = SRecord.new
|
689
|
-
], stream, version_any)
|
553
|
+
repository.append_to_stream([event0 = SRecord.new, event1 = SRecord.new], stream, version_any)
|
690
554
|
|
691
555
|
expect(repository.global_position(event0.event_id)).to eq(0)
|
692
556
|
expect(repository.global_position(event1.event_id)).to eq(1)
|
@@ -696,46 +560,94 @@ module RubyEventStore
|
|
696
560
|
skip unless helper.supports_position_queries?
|
697
561
|
just_an_id = "d5c134c2-db65-4e87-b6ea-d196f8f1a292"
|
698
562
|
|
699
|
-
expect do
|
700
|
-
repository.global_position(just_an_id)
|
701
|
-
end.to raise_error do |err|
|
563
|
+
expect { repository.global_position(just_an_id) }.to raise_error do |err|
|
702
564
|
expect(err).to be_a(EventNotFound)
|
703
565
|
expect(err.event_id).to eq(just_an_id)
|
704
566
|
end
|
705
567
|
end
|
706
568
|
|
569
|
+
it "#event_in_stream? when event does not exist" do
|
570
|
+
skip unless helper.supports_event_in_stream_query?
|
571
|
+
repository.append_to_stream([SRecord.new], stream, version_any)
|
572
|
+
just_an_id = "d5c134c2-db65-4e87-b6ea-d196f8f1a292"
|
573
|
+
|
574
|
+
expect(repository.event_in_stream?(just_an_id, stream)).to eq(false)
|
575
|
+
end
|
576
|
+
|
577
|
+
it "#event_in_stream? when event published into stream" do
|
578
|
+
skip unless helper.supports_event_in_stream_query?
|
579
|
+
repository.append_to_stream([event0 = SRecord.new], stream, version_any)
|
580
|
+
|
581
|
+
expect(repository.event_in_stream?(event0.event_id, stream)).to eq(true)
|
582
|
+
end
|
583
|
+
|
584
|
+
it "#event_in_stream? when event not linked into stream" do
|
585
|
+
skip unless helper.supports_event_in_stream_query?
|
586
|
+
repository.append_to_stream([SRecord.new], stream_flow, version_any)
|
587
|
+
repository.append_to_stream([event1 = SRecord.new], stream, version_any)
|
588
|
+
|
589
|
+
expect(repository.event_in_stream?(event1.event_id, stream_flow)).to eq(false)
|
590
|
+
end
|
591
|
+
|
592
|
+
it "#event_in_stream? when event linked into stream" do
|
593
|
+
skip unless helper.supports_event_in_stream_query?
|
594
|
+
repository.append_to_stream([event0 = SRecord.new], stream, version_any)
|
595
|
+
repository.link_to_stream([event0.event_id], stream_flow, version_any)
|
596
|
+
|
597
|
+
expect(repository.event_in_stream?(event0.event_id, stream_flow)).to eq(true)
|
598
|
+
end
|
599
|
+
|
600
|
+
it "#event_in_stream? when stream is empty" do
|
601
|
+
skip unless helper.supports_event_in_stream_query?
|
602
|
+
just_an_id = "d5c134c2-db65-4e87-b6ea-d196f8f1a292"
|
603
|
+
|
604
|
+
expect(repository.event_in_stream?(just_an_id, stream)).to eq(false)
|
605
|
+
end
|
606
|
+
|
707
607
|
it "knows last event in stream" do
|
708
|
-
repository.append_to_stream(
|
709
|
-
|
608
|
+
repository.append_to_stream(
|
609
|
+
[a = SRecord.new(event_id: "00000000-0000-0000-0000-000000000001")],
|
610
|
+
stream,
|
611
|
+
version_none
|
612
|
+
)
|
613
|
+
repository.append_to_stream(
|
614
|
+
[b = SRecord.new(event_id: "00000000-0000-0000-0000-000000000002")],
|
615
|
+
stream,
|
616
|
+
version_0
|
617
|
+
)
|
710
618
|
|
711
619
|
expect(repository.last_stream_event(stream)).to eq(b)
|
712
620
|
expect(repository.last_stream_event(stream_other)).to be_nil
|
713
621
|
end
|
714
622
|
|
715
623
|
it "knows last event in stream when linked" do
|
716
|
-
repository
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
|
721
|
-
|
722
|
-
|
624
|
+
repository
|
625
|
+
.append_to_stream(
|
626
|
+
[
|
627
|
+
e0 = SRecord.new(event_id: "00000000-0000-0000-0000-000000000001"),
|
628
|
+
e1 = SRecord.new(event_id: "00000000-0000-0000-0000-000000000002")
|
629
|
+
],
|
630
|
+
stream,
|
631
|
+
version_none
|
632
|
+
)
|
633
|
+
.link_to_stream([e1.event_id, e0.event_id], stream_flow, version_none)
|
723
634
|
expect(repository.last_stream_event(stream_flow)).to eq(e0)
|
724
635
|
end
|
725
636
|
|
726
637
|
it "reads batch of events from stream forward & backward" do
|
727
|
-
events =
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
|
735
|
-
|
736
|
-
|
737
|
-
|
738
|
-
|
638
|
+
events =
|
639
|
+
%w[
|
640
|
+
96c920b1-cdd0-40f4-907c-861b9fff7d02
|
641
|
+
56404f79-0ba0-4aa0-8524-dc3436368ca0
|
642
|
+
6a54dd21-f9d8-4857-a195-f5588d9e406c
|
643
|
+
0e50a9cd-f981-4e39-93d5-697fc7285b98
|
644
|
+
d85589bc-b993-41d4-812f-fc631d9185d5
|
645
|
+
96bdacda-77dd-4d7d-973d-cbdaa5842855
|
646
|
+
94688199-e6b7-4180-bf8e-825b6808e6cc
|
647
|
+
68fab040-741e-4bc2-9cca-5b8855b0ca19
|
648
|
+
ab60114c-011d-4d58-ab31-7ba65d99975e
|
649
|
+
868cac42-3d19-4b39-84e8-cd32d65c2445
|
650
|
+
].map { |id| SRecord.new(event_id: id) }
|
739
651
|
repository.append_to_stream([SRecord.new], stream_other, version_none)
|
740
652
|
events.each.with_index do |event, index|
|
741
653
|
repository.append_to_stream([event], stream, ExpectedVersion.new(index - 1))
|
@@ -752,25 +664,30 @@ module RubyEventStore
|
|
752
664
|
|
753
665
|
expect(read_events_backward(repository, stream, count: 3)).to eq(events.last(3).reverse)
|
754
666
|
expect(read_events_backward(repository, stream, count: 100)).to eq(events.reverse)
|
755
|
-
expect(read_events_backward(repository, stream, from: events[4].event_id, count: 4)).to eq(
|
756
|
-
|
667
|
+
expect(read_events_backward(repository, stream, from: events[4].event_id, count: 4)).to eq(
|
668
|
+
events.first(4).reverse
|
669
|
+
)
|
670
|
+
expect(read_events_backward(repository, stream, from: events[4].event_id, count: 100)).to eq(
|
671
|
+
events.first(4).reverse
|
672
|
+
)
|
757
673
|
expect(read_events_backward(repository, stream, to: events[4].event_id, count: 4)).to eq(events.last(4).reverse)
|
758
674
|
expect(read_events_backward(repository, stream, to: events[4].event_id, count: 100)).to eq(events.last(5).reverse)
|
759
675
|
end
|
760
676
|
|
761
677
|
it "reads batch of linked events from stream forward & backward" do
|
762
|
-
events =
|
763
|
-
|
764
|
-
|
765
|
-
|
766
|
-
|
767
|
-
|
768
|
-
|
769
|
-
|
770
|
-
|
771
|
-
|
772
|
-
|
773
|
-
|
678
|
+
events =
|
679
|
+
%w[
|
680
|
+
96c920b1-cdd0-40f4-907c-861b9fff7d02
|
681
|
+
56404f79-0ba0-4aa0-8524-dc3436368ca0
|
682
|
+
6a54dd21-f9d8-4857-a195-f5588d9e406c
|
683
|
+
0e50a9cd-f981-4e39-93d5-697fc7285b98
|
684
|
+
d85589bc-b993-41d4-812f-fc631d9185d5
|
685
|
+
96bdacda-77dd-4d7d-973d-cbdaa5842855
|
686
|
+
94688199-e6b7-4180-bf8e-825b6808e6cc
|
687
|
+
68fab040-741e-4bc2-9cca-5b8855b0ca19
|
688
|
+
ab60114c-011d-4d58-ab31-7ba65d99975e
|
689
|
+
868cac42-3d19-4b39-84e8-cd32d65c2445
|
690
|
+
].map { |id| SRecord.new(event_id: id) }
|
774
691
|
repository.append_to_stream([SRecord.new], stream_other, version_none)
|
775
692
|
events.each.with_index do |event, index|
|
776
693
|
repository
|
@@ -788,10 +705,18 @@ module RubyEventStore
|
|
788
705
|
|
789
706
|
expect(read_events_backward(repository, stream_flow, count: 3)).to eq(events.last(3).reverse)
|
790
707
|
expect(read_events_backward(repository, stream_flow, count: 100)).to eq(events.reverse)
|
791
|
-
expect(read_events_backward(repository, stream_flow, from: events[4].event_id, count: 4)).to eq(
|
792
|
-
|
793
|
-
|
794
|
-
expect(read_events_backward(repository, stream_flow,
|
708
|
+
expect(read_events_backward(repository, stream_flow, from: events[4].event_id, count: 4)).to eq(
|
709
|
+
events.first(4).reverse
|
710
|
+
)
|
711
|
+
expect(read_events_backward(repository, stream_flow, from: events[4].event_id, count: 100)).to eq(
|
712
|
+
events.first(4).reverse
|
713
|
+
)
|
714
|
+
expect(read_events_backward(repository, stream_flow, to: events[4].event_id, count: 4)).to eq(
|
715
|
+
events[6..9].reverse
|
716
|
+
)
|
717
|
+
expect(read_events_backward(repository, stream_flow, to: events[4].event_id, count: 100)).to eq(
|
718
|
+
events[5..9].reverse
|
719
|
+
)
|
795
720
|
end
|
796
721
|
|
797
722
|
it "reads all stream events forward & backward" do
|
@@ -804,8 +729,8 @@ module RubyEventStore
|
|
804
729
|
.append_to_stream([d = SRecord.new(event_id: "30963ed9-6349-450b-ac9b-8ea50115b3bd")], s2, version_0)
|
805
730
|
.append_to_stream([e = SRecord.new(event_id: "5bdc58b7-e8a7-4621-afd6-ccb828d72457")], s2, version_1)
|
806
731
|
|
807
|
-
expect(read_events_forward(repository, s1)).to eq [a,c]
|
808
|
-
expect(read_events_backward(repository, s1)).to eq [c,a]
|
732
|
+
expect(read_events_forward(repository, s1)).to eq [a, c]
|
733
|
+
expect(read_events_backward(repository, s1)).to eq [c, a]
|
809
734
|
end
|
810
735
|
|
811
736
|
it "reads all stream linked events forward & backward" do
|
@@ -822,26 +747,25 @@ module RubyEventStore
|
|
822
747
|
.link_to_stream(["30963ed9-6349-450b-ac9b-8ea50115b3bd"], fs2, version_0)
|
823
748
|
.link_to_stream(["5bdc58b7-e8a7-4621-afd6-ccb828d72457"], fs2, version_1)
|
824
749
|
|
825
|
-
expect(read_events_forward(repository, fs1)).to eq [a,c]
|
826
|
-
expect(read_events_backward(repository, fs1)).to eq [c,a]
|
750
|
+
expect(read_events_forward(repository, fs1)).to eq [a, c]
|
751
|
+
expect(read_events_backward(repository, fs1)).to eq [c, a]
|
827
752
|
end
|
828
753
|
|
829
754
|
it "reads batch of events from all streams forward & backward" do
|
830
|
-
events =
|
831
|
-
|
832
|
-
|
833
|
-
|
834
|
-
|
835
|
-
|
836
|
-
|
837
|
-
|
838
|
-
|
839
|
-
|
840
|
-
|
841
|
-
|
842
|
-
|
843
|
-
|
844
|
-
end
|
755
|
+
events =
|
756
|
+
%w[
|
757
|
+
96c920b1-cdd0-40f4-907c-861b9fff7d02
|
758
|
+
56404f79-0ba0-4aa0-8524-dc3436368ca0
|
759
|
+
6a54dd21-f9d8-4857-a195-f5588d9e406c
|
760
|
+
0e50a9cd-f981-4e39-93d5-697fc7285b98
|
761
|
+
d85589bc-b993-41d4-812f-fc631d9185d5
|
762
|
+
96bdacda-77dd-4d7d-973d-cbdaa5842855
|
763
|
+
94688199-e6b7-4180-bf8e-825b6808e6cc
|
764
|
+
68fab040-741e-4bc2-9cca-5b8855b0ca19
|
765
|
+
ab60114c-011d-4d58-ab31-7ba65d99975e
|
766
|
+
868cac42-3d19-4b39-84e8-cd32d65c2445
|
767
|
+
].map { |id| SRecord.new(event_id: id) }
|
768
|
+
events.each { |ev| repository.append_to_stream([ev], Stream.new(SecureRandom.uuid), version_none) }
|
845
769
|
|
846
770
|
expect(read_events_forward(repository, count: 3)).to eq(events.first(3))
|
847
771
|
expect(read_events_forward(repository, count: 100)).to eq(events)
|
@@ -859,18 +783,19 @@ module RubyEventStore
|
|
859
783
|
end
|
860
784
|
|
861
785
|
it "linked events do not affect reading from all streams - no duplicates" do
|
862
|
-
events =
|
863
|
-
|
864
|
-
|
865
|
-
|
866
|
-
|
867
|
-
|
868
|
-
|
869
|
-
|
870
|
-
|
871
|
-
|
872
|
-
|
873
|
-
|
786
|
+
events =
|
787
|
+
%w[
|
788
|
+
96c920b1-cdd0-40f4-907c-861b9fff7d02
|
789
|
+
56404f79-0ba0-4aa0-8524-dc3436368ca0
|
790
|
+
6a54dd21-f9d8-4857-a195-f5588d9e406c
|
791
|
+
0e50a9cd-f981-4e39-93d5-697fc7285b98
|
792
|
+
d85589bc-b993-41d4-812f-fc631d9185d5
|
793
|
+
96bdacda-77dd-4d7d-973d-cbdaa5842855
|
794
|
+
94688199-e6b7-4180-bf8e-825b6808e6cc
|
795
|
+
68fab040-741e-4bc2-9cca-5b8855b0ca19
|
796
|
+
ab60114c-011d-4d58-ab31-7ba65d99975e
|
797
|
+
868cac42-3d19-4b39-84e8-cd32d65c2445
|
798
|
+
].map { |id| SRecord.new(event_id: id) }
|
874
799
|
events.each do |ev|
|
875
800
|
repository
|
876
801
|
.append_to_stream([ev], Stream.new(SecureRandom.uuid), version_none)
|
@@ -893,45 +818,43 @@ module RubyEventStore
|
|
893
818
|
end
|
894
819
|
|
895
820
|
it "reads events different uuid object but same content" do
|
896
|
-
events =
|
897
|
-
96c920b1-cdd0-40f4-907c-861b9fff7d02
|
898
|
-
|
899
|
-
|
821
|
+
events =
|
822
|
+
%w[96c920b1-cdd0-40f4-907c-861b9fff7d02 56404f79-0ba0-4aa0-8524-dc3436368ca0].map do |id|
|
823
|
+
SRecord.new(event_id: id)
|
824
|
+
end
|
900
825
|
repository.append_to_stream([events.first], stream, version_none)
|
901
|
-
repository.append_to_stream([events.last],
|
826
|
+
repository.append_to_stream([events.last], stream, version_0)
|
902
827
|
|
903
828
|
expect(read_events_forward(repository, from: "96c920b1-cdd0-40f4-907c-861b9fff7d02")).to eq([events.last])
|
904
829
|
expect(read_events_backward(repository, from: "56404f79-0ba0-4aa0-8524-dc3436368ca0")).to eq([events.first])
|
905
|
-
expect(read_events_forward(repository, to: "56404f79-0ba0-4aa0-8524-dc3436368ca0", count: 1)).to eq(
|
906
|
-
|
830
|
+
expect(read_events_forward(repository, to: "56404f79-0ba0-4aa0-8524-dc3436368ca0", count: 1)).to eq(
|
831
|
+
[events.first]
|
832
|
+
)
|
833
|
+
expect(read_events_backward(repository, to: "96c920b1-cdd0-40f4-907c-861b9fff7d02", count: 1)).to eq(
|
834
|
+
[events.last]
|
835
|
+
)
|
907
836
|
|
908
837
|
expect(read_events_forward(repository, stream, from: "96c920b1-cdd0-40f4-907c-861b9fff7d02")).to eq([events.last])
|
909
|
-
expect(read_events_backward(repository, stream, from: "56404f79-0ba0-4aa0-8524-dc3436368ca0")).to eq(
|
910
|
-
|
911
|
-
|
838
|
+
expect(read_events_backward(repository, stream, from: "56404f79-0ba0-4aa0-8524-dc3436368ca0")).to eq(
|
839
|
+
[events.first]
|
840
|
+
)
|
841
|
+
expect(read_events_forward(repository, stream, to: "56404f79-0ba0-4aa0-8524-dc3436368ca0", count: 1)).to eq(
|
842
|
+
[events.first]
|
843
|
+
)
|
844
|
+
expect(read_events_backward(repository, stream, to: "96c920b1-cdd0-40f4-907c-861b9fff7d02", count: 1)).to eq(
|
845
|
+
[events.last]
|
846
|
+
)
|
912
847
|
end
|
913
848
|
|
914
849
|
it "does not allow same event twice in a stream" do
|
915
|
-
repository.append_to_stream(
|
916
|
-
[SRecord.new(event_id: "a1b49edb-7636-416f-874a-88f94b859bef")],
|
917
|
-
stream,
|
918
|
-
version_none
|
919
|
-
)
|
850
|
+
repository.append_to_stream([SRecord.new(event_id: "a1b49edb-7636-416f-874a-88f94b859bef")], stream, version_none)
|
920
851
|
expect do
|
921
|
-
repository.append_to_stream(
|
922
|
-
[SRecord.new(event_id: "a1b49edb-7636-416f-874a-88f94b859bef")],
|
923
|
-
stream,
|
924
|
-
version_0
|
925
|
-
)
|
852
|
+
repository.append_to_stream([SRecord.new(event_id: "a1b49edb-7636-416f-874a-88f94b859bef")], stream, version_0)
|
926
853
|
end.to raise_error(EventDuplicatedInStream)
|
927
854
|
end
|
928
855
|
|
929
856
|
it "does not allow same event twice" do
|
930
|
-
repository.append_to_stream(
|
931
|
-
[SRecord.new(event_id: "a1b49edb-7636-416f-874a-88f94b859bef")],
|
932
|
-
stream,
|
933
|
-
version_none
|
934
|
-
)
|
857
|
+
repository.append_to_stream([SRecord.new(event_id: "a1b49edb-7636-416f-874a-88f94b859bef")], stream, version_none)
|
935
858
|
expect do
|
936
859
|
repository.append_to_stream(
|
937
860
|
[SRecord.new(event_id: "a1b49edb-7636-416f-874a-88f94b859bef")],
|
@@ -942,11 +865,9 @@ module RubyEventStore
|
|
942
865
|
end
|
943
866
|
|
944
867
|
it "does not allow linking same event twice in a stream" do
|
945
|
-
repository
|
946
|
-
[SRecord.new(event_id: "a1b49edb-7636-416f-874a-88f94b859bef")],
|
947
|
-
|
948
|
-
version_none
|
949
|
-
).link_to_stream(["a1b49edb-7636-416f-874a-88f94b859bef"], stream_flow, version_none)
|
868
|
+
repository
|
869
|
+
.append_to_stream([SRecord.new(event_id: "a1b49edb-7636-416f-874a-88f94b859bef")], stream, version_none)
|
870
|
+
.link_to_stream(["a1b49edb-7636-416f-874a-88f94b859bef"], stream_flow, version_none)
|
950
871
|
expect do
|
951
872
|
repository.link_to_stream(["a1b49edb-7636-416f-874a-88f94b859bef"], stream_flow, version_0)
|
952
873
|
end.to raise_error(EventDuplicatedInStream)
|
@@ -993,11 +914,7 @@ module RubyEventStore
|
|
993
914
|
binary.force_encoding("binary")
|
994
915
|
expect(binary.valid_encoding?).to eq(true)
|
995
916
|
|
996
|
-
repository.append_to_stream(
|
997
|
-
[event = SRecord.new(data: binary, metadata: binary)],
|
998
|
-
stream,
|
999
|
-
version_none
|
1000
|
-
)
|
917
|
+
repository.append_to_stream([event = SRecord.new(data: binary, metadata: binary)], stream, version_none)
|
1001
918
|
end
|
1002
919
|
|
1003
920
|
specify do
|
@@ -1005,29 +922,16 @@ module RubyEventStore
|
|
1005
922
|
expect(repository.read(specification.in_batches.as_at.result)).to be_kind_of(Enumerator)
|
1006
923
|
expect(repository.read(specification.in_batches.as_of.result)).to be_kind_of(Enumerator)
|
1007
924
|
events = Array.new(10) { SRecord.new }
|
1008
|
-
repository.append_to_stream(
|
1009
|
-
events,
|
1010
|
-
Stream.new("Dummy"),
|
1011
|
-
ExpectedVersion.none
|
1012
|
-
)
|
925
|
+
repository.append_to_stream(events, Stream.new("Dummy"), ExpectedVersion.none)
|
1013
926
|
expect(repository.read(specification.in_batches.result)).to be_kind_of(Enumerator)
|
1014
927
|
expect(repository.read(specification.in_batches.as_at.result)).to be_kind_of(Enumerator)
|
1015
928
|
expect(repository.read(specification.in_batches.as_of.result)).to be_kind_of(Enumerator)
|
1016
|
-
|
1017
929
|
end
|
1018
930
|
|
1019
931
|
specify do
|
1020
932
|
events = Array.new(400) { SRecord.new }
|
1021
|
-
repository.append_to_stream(
|
1022
|
-
|
1023
|
-
Stream.new("Foo"),
|
1024
|
-
ExpectedVersion.none
|
1025
|
-
)
|
1026
|
-
repository.append_to_stream(
|
1027
|
-
events[0...200],
|
1028
|
-
Stream.new("Dummy"),
|
1029
|
-
ExpectedVersion.none
|
1030
|
-
)
|
933
|
+
repository.append_to_stream(events[200...400], Stream.new("Foo"), ExpectedVersion.none)
|
934
|
+
repository.append_to_stream(events[0...200], Stream.new("Dummy"), ExpectedVersion.none)
|
1031
935
|
|
1032
936
|
batches = repository.read(specification.stream("Dummy").in_batches.result).to_a
|
1033
937
|
expect(batches.size).to eq(2)
|
@@ -1037,11 +941,7 @@ module RubyEventStore
|
|
1037
941
|
|
1038
942
|
specify do
|
1039
943
|
events = Array.new(200) { SRecord.new }
|
1040
|
-
repository.append_to_stream(
|
1041
|
-
events,
|
1042
|
-
Stream.new(GLOBAL_STREAM),
|
1043
|
-
ExpectedVersion.any
|
1044
|
-
)
|
944
|
+
repository.append_to_stream(events, Stream.new(GLOBAL_STREAM), ExpectedVersion.any)
|
1045
945
|
|
1046
946
|
batches = repository.read(specification.in_batches.result).to_a
|
1047
947
|
expect(batches.size).to eq(2)
|
@@ -1051,22 +951,14 @@ module RubyEventStore
|
|
1051
951
|
|
1052
952
|
specify do
|
1053
953
|
events = Array.new(200) { SRecord.new }
|
1054
|
-
repository.append_to_stream(
|
1055
|
-
events,
|
1056
|
-
Stream.new(GLOBAL_STREAM),
|
1057
|
-
ExpectedVersion.any
|
1058
|
-
)
|
954
|
+
repository.append_to_stream(events, Stream.new(GLOBAL_STREAM), ExpectedVersion.any)
|
1059
955
|
|
1060
956
|
expect(repository.read(specification.in_batches(200).result).to_a.size).to eq(1)
|
1061
957
|
end
|
1062
958
|
|
1063
959
|
specify do
|
1064
960
|
events = Array.new(200) { SRecord.new }
|
1065
|
-
repository.append_to_stream(
|
1066
|
-
events,
|
1067
|
-
Stream.new(GLOBAL_STREAM),
|
1068
|
-
ExpectedVersion.any
|
1069
|
-
)
|
961
|
+
repository.append_to_stream(events, Stream.new(GLOBAL_STREAM), ExpectedVersion.any)
|
1070
962
|
|
1071
963
|
batches = repository.read(specification.limit(199).in_batches.result).to_a
|
1072
964
|
expect(batches.size).to eq(2)
|
@@ -1078,11 +970,7 @@ module RubyEventStore
|
|
1078
970
|
|
1079
971
|
specify do
|
1080
972
|
events = Array.new(200) { SRecord.new }
|
1081
|
-
repository.append_to_stream(
|
1082
|
-
events,
|
1083
|
-
Stream.new(GLOBAL_STREAM),
|
1084
|
-
ExpectedVersion.any
|
1085
|
-
)
|
973
|
+
repository.append_to_stream(events, Stream.new(GLOBAL_STREAM), ExpectedVersion.any)
|
1086
974
|
|
1087
975
|
batches = repository.read(specification.limit(99).in_batches.result).to_a
|
1088
976
|
expect(batches.size).to eq(1)
|
@@ -1092,11 +980,7 @@ module RubyEventStore
|
|
1092
980
|
|
1093
981
|
specify do
|
1094
982
|
events = Array.new(200) { SRecord.new }
|
1095
|
-
repository.append_to_stream(
|
1096
|
-
events,
|
1097
|
-
Stream.new(GLOBAL_STREAM),
|
1098
|
-
ExpectedVersion.any
|
1099
|
-
)
|
983
|
+
repository.append_to_stream(events, Stream.new(GLOBAL_STREAM), ExpectedVersion.any)
|
1100
984
|
|
1101
985
|
batches = repository.read(specification.backward.limit(99).in_batches.result).to_a
|
1102
986
|
expect(batches.size).to eq(1)
|
@@ -1106,11 +990,7 @@ module RubyEventStore
|
|
1106
990
|
|
1107
991
|
specify do
|
1108
992
|
events = Array.new(200) { SRecord.new }
|
1109
|
-
repository.append_to_stream(
|
1110
|
-
events,
|
1111
|
-
Stream.new(GLOBAL_STREAM),
|
1112
|
-
ExpectedVersion.any
|
1113
|
-
)
|
993
|
+
repository.append_to_stream(events, Stream.new(GLOBAL_STREAM), ExpectedVersion.any)
|
1114
994
|
|
1115
995
|
batches = repository.read(specification.from(events[100].event_id).limit(99).in_batches.result).to_a
|
1116
996
|
expect(batches.size).to eq(1)
|
@@ -1123,11 +1003,7 @@ module RubyEventStore
|
|
1123
1003
|
expect(repository.read(specification.read_last.result)).to be_nil
|
1124
1004
|
|
1125
1005
|
events = Array.new(5) { SRecord.new }
|
1126
|
-
repository.append_to_stream(
|
1127
|
-
events,
|
1128
|
-
Stream.new(GLOBAL_STREAM),
|
1129
|
-
ExpectedVersion.any
|
1130
|
-
)
|
1006
|
+
repository.append_to_stream(events, Stream.new(GLOBAL_STREAM), ExpectedVersion.any)
|
1131
1007
|
|
1132
1008
|
expect(repository.read(specification.stream("Any").read_first.result)).to be_nil
|
1133
1009
|
expect(repository.read(specification.stream("Any").read_last.result)).to be_nil
|
@@ -1167,33 +1043,70 @@ module RubyEventStore
|
|
1167
1043
|
specify "changes events" do
|
1168
1044
|
skip unless helper.supports_upsert?
|
1169
1045
|
events = Array.new(5) { SRecord.new }
|
1170
|
-
repository.append_to_stream(
|
1171
|
-
|
1172
|
-
|
1173
|
-
|
1046
|
+
repository.append_to_stream(events[0..2], Stream.new("whatever"), ExpectedVersion.any)
|
1047
|
+
repository.append_to_stream(events[3..4], Stream.new("elo"), ExpectedVersion.any)
|
1048
|
+
repository.update_messages(
|
1049
|
+
[
|
1050
|
+
a =
|
1051
|
+
SRecord.new(
|
1052
|
+
event_id: events[0].event_id.clone,
|
1053
|
+
data: events[0].data,
|
1054
|
+
metadata: events[0].metadata,
|
1055
|
+
event_type: events[0].event_type,
|
1056
|
+
timestamp: events[0].timestamp
|
1057
|
+
),
|
1058
|
+
b =
|
1059
|
+
SRecord.new(
|
1060
|
+
event_id: events[1].event_id.dup,
|
1061
|
+
data: {
|
1062
|
+
"test" => 1
|
1063
|
+
},
|
1064
|
+
metadata: events[1].metadata,
|
1065
|
+
event_type: events[1].event_type,
|
1066
|
+
timestamp: events[1].timestamp
|
1067
|
+
),
|
1068
|
+
c =
|
1069
|
+
SRecord.new(
|
1070
|
+
event_id: events[2].event_id,
|
1071
|
+
data: events[2].data,
|
1072
|
+
metadata: {
|
1073
|
+
"test" => 2
|
1074
|
+
},
|
1075
|
+
event_type: events[2].event_type,
|
1076
|
+
timestamp: events[2].timestamp
|
1077
|
+
),
|
1078
|
+
d =
|
1079
|
+
SRecord.new(
|
1080
|
+
event_id: events[3].event_id.clone,
|
1081
|
+
data: events[3].data,
|
1082
|
+
metadata: events[3].metadata,
|
1083
|
+
event_type: "event_type3",
|
1084
|
+
timestamp: events[3].timestamp
|
1085
|
+
),
|
1086
|
+
e =
|
1087
|
+
SRecord.new(
|
1088
|
+
event_id: events[4].event_id.dup,
|
1089
|
+
data: {
|
1090
|
+
"test" => 4
|
1091
|
+
},
|
1092
|
+
metadata: {
|
1093
|
+
"test" => 42
|
1094
|
+
},
|
1095
|
+
event_type: "event_type4",
|
1096
|
+
timestamp: events[4].timestamp
|
1097
|
+
)
|
1098
|
+
]
|
1174
1099
|
)
|
1175
|
-
|
1176
|
-
|
1177
|
-
|
1178
|
-
|
1179
|
-
)
|
1180
|
-
repository.update_messages([
|
1181
|
-
a = SRecord.new(event_id: events[0].event_id.clone, data: events[0].data, metadata: events[0].metadata, event_type: events[0].event_type, timestamp: events[0].timestamp),
|
1182
|
-
b = SRecord.new(event_id: events[1].event_id.dup, data: { "test" => 1 }, metadata: events[1].metadata, event_type: events[1].event_type, timestamp: events[1].timestamp),
|
1183
|
-
c = SRecord.new(event_id: events[2].event_id, data: events[2].data, metadata: { "test" => 2 }, event_type: events[2].event_type, timestamp: events[2].timestamp),
|
1184
|
-
d = SRecord.new(event_id: events[3].event_id.clone, data: events[3].data, metadata: events[3].metadata, event_type: "event_type3", timestamp: events[3].timestamp),
|
1185
|
-
e = SRecord.new(event_id: events[4].event_id.dup, data: { "test" => 4 }, metadata: { "test" => 42 }, event_type: "event_type4", timestamp: events[4].timestamp),
|
1186
|
-
])
|
1187
|
-
|
1188
|
-
expect(repository.read(specification.result).to_a).to eq([a,b,c,d,e])
|
1189
|
-
expect(repository.read(specification.stream("whatever").result).to_a).to eq([a,b,c])
|
1190
|
-
expect(repository.read(specification.stream("elo").result).to_a).to eq([d,e])
|
1100
|
+
|
1101
|
+
expect(repository.read(specification.result).to_a).to eq([a, b, c, d, e])
|
1102
|
+
expect(repository.read(specification.stream("whatever").result).to_a).to eq([a, b, c])
|
1103
|
+
expect(repository.read(specification.stream("elo").result).to_a).to eq([d, e])
|
1191
1104
|
end
|
1192
1105
|
|
1193
1106
|
specify "cannot change unexisting event" do
|
1194
1107
|
skip unless helper.supports_upsert?
|
1195
1108
|
e = SRecord.new
|
1196
|
-
expect{ repository.update_messages([e]) }.to raise_error do |err|
|
1109
|
+
expect { repository.update_messages([e]) }.to raise_error do |err|
|
1197
1110
|
expect(err).to be_a(EventNotFound)
|
1198
1111
|
expect(err.event_id).to eq(e.event_id)
|
1199
1112
|
expect(err.message).to eq("Event not found: #{e.event_id}")
|
@@ -1233,28 +1146,40 @@ module RubyEventStore
|
|
1233
1146
|
stream = Stream.new("Stream A")
|
1234
1147
|
repository.append_to_stream([e1, e2, e3], stream, version_any)
|
1235
1148
|
|
1236
|
-
expect(repository.read(specification.with_id([
|
1237
|
-
|
1238
|
-
|
1239
|
-
expect(repository.read(specification.with_id([
|
1240
|
-
|
1241
|
-
|
1242
|
-
expect(repository.read(specification.with_id([
|
1243
|
-
|
1244
|
-
|
1245
|
-
expect(
|
1246
|
-
|
1247
|
-
|
1248
|
-
|
1249
|
-
|
1250
|
-
|
1251
|
-
|
1252
|
-
|
1253
|
-
|
1254
|
-
|
1255
|
-
expect(
|
1256
|
-
|
1257
|
-
|
1149
|
+
expect(repository.read(specification.with_id(["8a6f053e-3ce2-4c82-a55b-4d02c66ae6ea"]).read_first.result)).to eq(
|
1150
|
+
e1
|
1151
|
+
)
|
1152
|
+
expect(repository.read(specification.with_id(["d345f86d-b903-4d78-803f-38990c078d9e"]).read_first.result)).to eq(
|
1153
|
+
e3
|
1154
|
+
)
|
1155
|
+
expect(repository.read(specification.with_id(["c31b327c-0da1-4178-a3cd-d2f6bb5d0688"]).read_first.result)).to eq(
|
1156
|
+
nil
|
1157
|
+
)
|
1158
|
+
expect(
|
1159
|
+
repository.read(
|
1160
|
+
specification
|
1161
|
+
.with_id(%w[8a6f053e-3ce2-4c82-a55b-4d02c66ae6ea d345f86d-b903-4d78-803f-38990c078d9e])
|
1162
|
+
.in_batches
|
1163
|
+
.result
|
1164
|
+
).to_a[
|
1165
|
+
0
|
1166
|
+
]
|
1167
|
+
).to eq([e1, e3])
|
1168
|
+
expect(
|
1169
|
+
repository.read(
|
1170
|
+
specification.stream("Stream A").with_id(["8cee1139-4f96-483a-a175-2b947283c3c7"]).read_first.result
|
1171
|
+
)
|
1172
|
+
).to eq(e2)
|
1173
|
+
expect(
|
1174
|
+
repository.read(
|
1175
|
+
specification.stream("Stream B").with_id(["8cee1139-4f96-483a-a175-2b947283c3c7"]).read_first.result
|
1176
|
+
)
|
1177
|
+
).to eq(nil)
|
1178
|
+
expect(
|
1179
|
+
repository.read(
|
1180
|
+
specification.stream("Stream B").with_id(["c31b327c-0da1-4178-a3cd-d2f6bb5d0688"]).read_first.result
|
1181
|
+
)
|
1182
|
+
).to eq(nil)
|
1258
1183
|
expect(repository.read(specification.with_id([]).result).to_a).to eq([])
|
1259
1184
|
end
|
1260
1185
|
|
@@ -1265,20 +1190,18 @@ module RubyEventStore
|
|
1265
1190
|
stream = Stream.new("Stream A")
|
1266
1191
|
repository.append_to_stream([e1, e2, e3], stream, version_any)
|
1267
1192
|
|
1268
|
-
expect(repository.read(specification.of_type([Type1]).result).to_a).to eq([e1,e3])
|
1193
|
+
expect(repository.read(specification.of_type([Type1]).result).to_a).to eq([e1, e3])
|
1269
1194
|
expect(repository.read(specification.of_type([Type2]).result).to_a).to eq([e2])
|
1270
1195
|
expect(repository.read(specification.of_type([Type3]).result).to_a).to eq([])
|
1271
|
-
expect(repository.read(specification.of_type([Type1, Type2, Type3]).result).to_a).to eq([e1,e2,e3])
|
1196
|
+
expect(repository.read(specification.of_type([Type1, Type2, Type3]).result).to_a).to eq([e1, e2, e3])
|
1272
1197
|
end
|
1273
1198
|
|
1274
1199
|
specify do
|
1275
1200
|
stream = Stream.new("Stream A")
|
1276
|
-
dummy
|
1201
|
+
dummy = Stream.new("Dummy")
|
1277
1202
|
|
1278
1203
|
expect(repository.count(specification.result)).to eq(0)
|
1279
|
-
(1..3).each
|
1280
|
-
repository.append_to_stream([SRecord.new(event_type: Type1.to_s)], stream, version_any)
|
1281
|
-
end
|
1204
|
+
(1..3).each { repository.append_to_stream([SRecord.new(event_type: Type1.to_s)], stream, version_any) }
|
1282
1205
|
expect(repository.count(specification.result)).to eq(3)
|
1283
1206
|
event_id = SecureRandom.uuid
|
1284
1207
|
repository.append_to_stream([SRecord.new(event_type: Type1.to_s, event_id: event_id)], dummy, version_any)
|
@@ -1313,7 +1236,7 @@ module RubyEventStore
|
|
1313
1236
|
end
|
1314
1237
|
|
1315
1238
|
specify "timestamp precision" do
|
1316
|
-
time = Time.utc(2020, 9, 11, 12, 26, 0,
|
1239
|
+
time = Time.utc(2020, 9, 11, 12, 26, 0, 123_456)
|
1317
1240
|
repository.append_to_stream([SRecord.new(timestamp: time)], stream, version_none)
|
1318
1241
|
event = read_events_forward(repository, count: 1).first
|
1319
1242
|
|
@@ -1326,7 +1249,9 @@ module RubyEventStore
|
|
1326
1249
|
event_3 = SRecord.new(event_id: "d345f86d-b903-4d78-803f-38990c078d9e", timestamp: Time.utc(2020, 1, 3))
|
1327
1250
|
repository.append_to_stream([event_1, event_2, event_3], Stream.new("whatever"), version_any)
|
1328
1251
|
|
1329
|
-
expect(repository.read(specification.stream("whatever").older_than(Time.utc(2020, 1, 2)).result).to_a).to eq(
|
1252
|
+
expect(repository.read(specification.stream("whatever").older_than(Time.utc(2020, 1, 2)).result).to_a).to eq(
|
1253
|
+
[event_1]
|
1254
|
+
)
|
1330
1255
|
end
|
1331
1256
|
|
1332
1257
|
specify "fetching records older than or equal to specified date in stream" do
|
@@ -1335,7 +1260,9 @@ module RubyEventStore
|
|
1335
1260
|
event_3 = SRecord.new(event_id: "d345f86d-b903-4d78-803f-38990c078d9e", timestamp: Time.utc(2020, 1, 3))
|
1336
1261
|
repository.append_to_stream([event_1, event_2, event_3], Stream.new("whatever"), version_any)
|
1337
1262
|
|
1338
|
-
expect(
|
1263
|
+
expect(
|
1264
|
+
repository.read(specification.stream("whatever").older_than_or_equal(Time.utc(2020, 1, 2)).result).to_a
|
1265
|
+
).to eq([event_1, event_2])
|
1339
1266
|
end
|
1340
1267
|
|
1341
1268
|
specify "fetching records newer than specified date in stream" do
|
@@ -1344,7 +1271,9 @@ module RubyEventStore
|
|
1344
1271
|
event_3 = SRecord.new(event_id: "d345f86d-b903-4d78-803f-38990c078d9e", timestamp: Time.utc(2020, 1, 3))
|
1345
1272
|
repository.append_to_stream([event_1, event_2, event_3], Stream.new("whatever"), version_any)
|
1346
1273
|
|
1347
|
-
expect(repository.read(specification.stream("whatever").newer_than(Time.utc(2020, 1, 2)).result).to_a).to eq(
|
1274
|
+
expect(repository.read(specification.stream("whatever").newer_than(Time.utc(2020, 1, 2)).result).to_a).to eq(
|
1275
|
+
[event_3]
|
1276
|
+
)
|
1348
1277
|
end
|
1349
1278
|
|
1350
1279
|
specify "fetching records newer than or equal to specified date in stream" do
|
@@ -1353,7 +1282,9 @@ module RubyEventStore
|
|
1353
1282
|
event_3 = SRecord.new(event_id: "d345f86d-b903-4d78-803f-38990c078d9e", timestamp: Time.utc(2020, 1, 3))
|
1354
1283
|
repository.append_to_stream([event_1, event_2, event_3], Stream.new("whatever"), version_any)
|
1355
1284
|
|
1356
|
-
expect(
|
1285
|
+
expect(
|
1286
|
+
repository.read(specification.stream("whatever").newer_than_or_equal(Time.utc(2020, 1, 2)).result).to_a
|
1287
|
+
).to eq([event_2, event_3])
|
1357
1288
|
end
|
1358
1289
|
|
1359
1290
|
specify "fetching records older than specified date" do
|
@@ -1371,7 +1302,9 @@ module RubyEventStore
|
|
1371
1302
|
event_3 = SRecord.new(event_id: "d345f86d-b903-4d78-803f-38990c078d9e", timestamp: Time.utc(2020, 1, 3))
|
1372
1303
|
repository.append_to_stream([event_1, event_2, event_3], Stream.new("whatever"), version_any)
|
1373
1304
|
|
1374
|
-
expect(repository.read(specification.older_than_or_equal(Time.utc(2020, 1, 2)).result).to_a).to eq(
|
1305
|
+
expect(repository.read(specification.older_than_or_equal(Time.utc(2020, 1, 2)).result).to_a).to eq(
|
1306
|
+
[event_1, event_2]
|
1307
|
+
)
|
1375
1308
|
end
|
1376
1309
|
|
1377
1310
|
specify "fetching records newer than specified date" do
|
@@ -1389,7 +1322,9 @@ module RubyEventStore
|
|
1389
1322
|
event_3 = SRecord.new(event_id: "d345f86d-b903-4d78-803f-38990c078d9e", timestamp: Time.utc(2020, 1, 3))
|
1390
1323
|
repository.append_to_stream([event_1, event_2, event_3], Stream.new("whatever"), version_any)
|
1391
1324
|
|
1392
|
-
expect(repository.read(specification.newer_than_or_equal(Time.utc(2020, 1, 2)).result).to_a).to eq(
|
1325
|
+
expect(repository.read(specification.newer_than_or_equal(Time.utc(2020, 1, 2)).result).to_a).to eq(
|
1326
|
+
[event_2, event_3]
|
1327
|
+
)
|
1393
1328
|
end
|
1394
1329
|
|
1395
1330
|
specify "fetching records from disjoint periods" do
|
@@ -1398,7 +1333,9 @@ module RubyEventStore
|
|
1398
1333
|
event_3 = SRecord.new(event_id: "d345f86d-b903-4d78-803f-38990c078d9e", timestamp: Time.utc(2020, 1, 3))
|
1399
1334
|
repository.append_to_stream([event_1, event_2, event_3], Stream.new("whatever"), version_any)
|
1400
1335
|
|
1401
|
-
expect(
|
1336
|
+
expect(
|
1337
|
+
repository.read(specification.older_than(Time.utc(2020, 1, 2)).newer_than(Time.utc(2020, 1, 2)).result).to_a
|
1338
|
+
).to eq([])
|
1402
1339
|
end
|
1403
1340
|
|
1404
1341
|
specify "fetching records within time range" do
|
@@ -1407,38 +1344,58 @@ module RubyEventStore
|
|
1407
1344
|
event_3 = SRecord.new(event_id: "d345f86d-b903-4d78-803f-38990c078d9e", timestamp: Time.utc(2020, 1, 3))
|
1408
1345
|
repository.append_to_stream([event_1, event_2, event_3], Stream.new("whatever"), version_any)
|
1409
1346
|
|
1410
|
-
expect(repository.read(specification.between(Time.utc(2020, 1, 1)...Time.utc(2020, 1, 3)).result).to_a).to eq(
|
1347
|
+
expect(repository.read(specification.between(Time.utc(2020, 1, 1)...Time.utc(2020, 1, 3)).result).to_a).to eq(
|
1348
|
+
[event_1, event_2]
|
1349
|
+
)
|
1411
1350
|
end
|
1412
1351
|
|
1413
1352
|
specify "time order is respected" do
|
1414
|
-
repository.append_to_stream(
|
1415
|
-
|
1416
|
-
SRecord.new(
|
1417
|
-
|
1353
|
+
repository.append_to_stream(
|
1354
|
+
[
|
1355
|
+
SRecord.new(
|
1356
|
+
event_id: e1 = SecureRandom.uuid,
|
1357
|
+
timestamp: Time.new(2020, 1, 1),
|
1358
|
+
valid_at: Time.new(2020, 1, 9)
|
1359
|
+
),
|
1360
|
+
SRecord.new(
|
1361
|
+
event_id: e2 = SecureRandom.uuid,
|
1362
|
+
timestamp: Time.new(2020, 1, 3),
|
1363
|
+
valid_at: Time.new(2020, 1, 6)
|
1364
|
+
),
|
1365
|
+
SRecord.new(event_id: e3 = SecureRandom.uuid, timestamp: Time.new(2020, 1, 2), valid_at: Time.new(2020, 1, 3))
|
1418
1366
|
],
|
1419
1367
|
Stream.new("Dummy"),
|
1420
1368
|
ExpectedVersion.any
|
1421
1369
|
)
|
1422
|
-
expect(repository.read(specification.result)).to
|
1423
|
-
expect(repository.read(specification.as_at.result)).to
|
1370
|
+
expect(repository.read(specification.result)).to eq_ids([e1, e2, e3])
|
1371
|
+
expect(repository.read(specification.as_at.result)).to eq_ids([e1, e3, e2])
|
1424
1372
|
expect(repository.read(specification.as_at.backward.result)).to eq_ids([e2, e3, e1])
|
1425
|
-
expect(repository.read(specification.as_of.result)).to
|
1373
|
+
expect(repository.read(specification.as_of.result)).to eq_ids([e3, e2, e1])
|
1426
1374
|
expect(repository.read(specification.as_of.backward.result)).to eq_ids([e1, e2, e3])
|
1427
1375
|
end
|
1428
1376
|
|
1429
1377
|
specify "time order is respected with batches" do
|
1430
|
-
repository.append_to_stream(
|
1431
|
-
|
1432
|
-
|
1433
|
-
|
1434
|
-
|
1378
|
+
repository.append_to_stream(
|
1379
|
+
[
|
1380
|
+
SRecord.new(
|
1381
|
+
event_id: e1 = SecureRandom.uuid,
|
1382
|
+
timestamp: Time.new(2020, 1, 1),
|
1383
|
+
valid_at: Time.new(2020, 1, 9)
|
1384
|
+
),
|
1385
|
+
SRecord.new(
|
1386
|
+
event_id: e2 = SecureRandom.uuid,
|
1387
|
+
timestamp: Time.new(2020, 1, 3),
|
1388
|
+
valid_at: Time.new(2020, 1, 6)
|
1389
|
+
),
|
1390
|
+
SRecord.new(event_id: e3 = SecureRandom.uuid, timestamp: Time.new(2020, 1, 2), valid_at: Time.new(2020, 1, 3))
|
1391
|
+
],
|
1435
1392
|
Stream.new("Dummy"),
|
1436
1393
|
ExpectedVersion.any
|
1437
1394
|
)
|
1438
|
-
expect(repository.read(specification.in_batches.result).to_a.flatten).to
|
1439
|
-
expect(repository.read(specification.in_batches.as_at.result).to_a.flatten).to
|
1395
|
+
expect(repository.read(specification.in_batches.result).to_a.flatten).to eq_ids([e1, e2, e3])
|
1396
|
+
expect(repository.read(specification.in_batches.as_at.result).to_a.flatten).to eq_ids([e1, e3, e2])
|
1440
1397
|
expect(repository.read(specification.in_batches.as_at.backward.result).to_a.flatten).to eq_ids([e2, e3, e1])
|
1441
|
-
expect(repository.read(specification.in_batches.as_of.result).to_a.flatten).to
|
1398
|
+
expect(repository.read(specification.in_batches.as_of.result).to_a.flatten).to eq_ids([e3, e2, e1])
|
1442
1399
|
expect(repository.read(specification.in_batches.as_of.backward.result).to_a.flatten).to eq_ids([e1, e2, e3])
|
1443
1400
|
end
|
1444
1401
|
end
|