nexia_event_store 0.10.1 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/event_store.rb +22 -13
- data/lib/event_store/aggregate.rb +5 -5
- data/lib/event_store/client.rb +3 -2
- data/lib/event_store/event_stream.rb +18 -10
- data/lib/event_store/snapshot.rb +2 -2
- data/lib/event_store/version.rb +1 -1
- data/spec/event_store/client_spec.rb +3 -2
- data/spec/event_store/event_stream_spec.rb +69 -0
- data/spec/event_store/snapshot_spec.rb +6 -4
- data/spec/unit/snapshot_unit_spec.rb +2 -2
- metadata +5 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a36ee7d65c27df5609edfe9b2807da29142fd332
|
4
|
+
data.tar.gz: 9e3e8394fae1ff84857675343bee94e9daa08738
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 8971a7a3bc6b4d3a6af6d439d7058720a56ef77b02e49e56df5e81d78631c034260b85b4007c6981b0662448cf1f7bdb359222b867eba2cfed4c03aaecb48b9f
|
7
|
+
data.tar.gz: a0012132f78ea8b63d6d2ab010f8480ee3e2b57321b5ae1abcf197e053489a88eac7714a38c830cc14bb0103742f1ea120ec4ed062e7e85bfc7583fa7f33df63
|
data/lib/event_store.rb
CHANGED
@@ -12,6 +12,7 @@ require 'event_store/aggregate'
|
|
12
12
|
require 'event_store/client'
|
13
13
|
require 'event_store/errors'
|
14
14
|
require 'yaml'
|
15
|
+
require 'zlib'
|
15
16
|
|
16
17
|
Sequel.extension :migration
|
17
18
|
|
@@ -40,8 +41,9 @@ module EventStore
|
|
40
41
|
@db
|
41
42
|
end
|
42
43
|
|
43
|
-
def self.redis
|
44
|
-
|
44
|
+
def self.redis(hostname)
|
45
|
+
hash = Zlib::crc32(hostname)
|
46
|
+
@redis[hash % @redis.length]
|
45
47
|
end
|
46
48
|
|
47
49
|
def self.connect(*args)
|
@@ -49,7 +51,14 @@ module EventStore
|
|
49
51
|
end
|
50
52
|
|
51
53
|
def self.redis_connect(config_hash)
|
52
|
-
|
54
|
+
if config_hash["hosts"]
|
55
|
+
generic_config = config_hash["hosts"].reject { |k, _| k == "hosts" }
|
56
|
+
@redis = config_hash["hosts"].map { |hostname|
|
57
|
+
Redis.new(generic_config.merge("host" => hostname))
|
58
|
+
}
|
59
|
+
else
|
60
|
+
@redis ||= [Redis.new(config_hash)]
|
61
|
+
end
|
53
62
|
end
|
54
63
|
|
55
64
|
def self.local_redis_config
|
@@ -103,7 +112,7 @@ module EventStore
|
|
103
112
|
return unless connected?
|
104
113
|
EventStore.db.from(fully_qualified_table).delete
|
105
114
|
EventStore.db.from(fully_qualified_names_table).delete
|
106
|
-
|
115
|
+
@redis.map(&:flushdb)
|
107
116
|
end
|
108
117
|
|
109
118
|
def self.postgres(environment = 'test', table_name = 'events', schema = 'event_store_test')
|
@@ -142,14 +151,14 @@ module EventStore
|
|
142
151
|
|
143
152
|
def self.custom_config(database_config, redis_config, table_name = 'events', environment = 'production')
|
144
153
|
self.redis_connect(redis_config)
|
145
|
-
database_config = database_config.
|
146
|
-
redis_config = redis_config.
|
147
|
-
|
148
|
-
@adapter
|
149
|
-
@environment
|
150
|
-
@db_config
|
151
|
-
@table_name
|
152
|
-
@schema
|
154
|
+
database_config = database_config.each_with_object({}) {|(k,v), memo| memo[k.to_s] = v}
|
155
|
+
redis_config = redis_config.each_with_object({}) {|(k,v), memo| memo[k.to_s] = v}
|
156
|
+
|
157
|
+
@adapter = database_config["adapter"].to_s
|
158
|
+
@environment = environment
|
159
|
+
@db_config = database_config
|
160
|
+
@table_name = table_name
|
161
|
+
@schema = database_config["schema"].to_s
|
153
162
|
@use_names_table = database_config.fetch("use_names_table", true)
|
154
163
|
connect_db
|
155
164
|
end
|
@@ -168,7 +177,7 @@ module EventStore
|
|
168
177
|
|
169
178
|
def self.create_db
|
170
179
|
connect_db
|
171
|
-
table =
|
180
|
+
table = Sequel.qualify(schema, "schema_info")
|
172
181
|
@db.run("CREATE SCHEMA IF NOT EXISTS #{schema}")
|
173
182
|
Sequel::Migrator.run(@db, File.expand_path(File.join('..','..','db', self.migrations_dir), __FILE__), table: table)
|
174
183
|
end
|
@@ -4,7 +4,7 @@ module EventStore
|
|
4
4
|
class Aggregate
|
5
5
|
extend Forwardable
|
6
6
|
|
7
|
-
attr_reader :id, :type, :event_table, :snapshot, :event_stream, :
|
7
|
+
attr_reader :id, :type, :event_table, :snapshot, :event_stream, :checkpoint_events
|
8
8
|
|
9
9
|
def_delegators :snapshot,
|
10
10
|
:last_event,
|
@@ -35,13 +35,13 @@ module EventStore
|
|
35
35
|
EventStore.db.from(EventStore.fully_qualified_table).select(:aggregate_id).distinct.order(:aggregate_id).limit(limit, offset).all.map{|item| item[:aggregate_id]}
|
36
36
|
end
|
37
37
|
|
38
|
-
def initialize(id, type = EventStore.table_name,
|
38
|
+
def initialize(id, type = EventStore.table_name, checkpoint_events = [])
|
39
39
|
@id = id
|
40
40
|
@type = type
|
41
41
|
|
42
|
-
@
|
43
|
-
@snapshot
|
44
|
-
@event_stream
|
42
|
+
@checkpoint_events = checkpoint_events
|
43
|
+
@snapshot = Snapshot.new(self)
|
44
|
+
@event_stream = EventStream.new(self)
|
45
45
|
end
|
46
46
|
|
47
47
|
def append(events, logger)
|
data/lib/event_store/client.rb
CHANGED
@@ -22,8 +22,9 @@ module EventStore
|
|
22
22
|
Aggregate.ids(offset, limit)
|
23
23
|
end
|
24
24
|
|
25
|
-
def initialize(aggregate_id, aggregate_type = EventStore.table_name,
|
26
|
-
|
25
|
+
def initialize(aggregate_id, aggregate_type = EventStore.table_name, checkpoint_events = [])
|
26
|
+
checkpoint_events = [checkpoint_events].flatten
|
27
|
+
@aggregate = Aggregate.new(aggregate_id, aggregate_type, checkpoint_events)
|
27
28
|
end
|
28
29
|
|
29
30
|
def exists?
|
@@ -2,15 +2,15 @@ module EventStore
|
|
2
2
|
class EventStream
|
3
3
|
include Enumerable
|
4
4
|
|
5
|
-
attr_reader :event_table, :
|
5
|
+
attr_reader :event_table, :checkpoint_events
|
6
6
|
|
7
7
|
def initialize aggregate
|
8
8
|
@aggregate = aggregate
|
9
9
|
@id = @aggregate.id
|
10
|
-
@
|
10
|
+
@checkpoint_events = aggregate.checkpoint_events
|
11
11
|
@event_table_alias = "events"
|
12
|
-
@event_table =
|
13
|
-
@aliased_event_table =
|
12
|
+
@event_table = Sequel.qualify(EventStore.schema, EventStore.table_name)
|
13
|
+
@aliased_event_table = event_table.as(@event_table_alias)
|
14
14
|
@names_table = EventStore.fully_qualified_names_table
|
15
15
|
end
|
16
16
|
|
@@ -57,17 +57,25 @@ module EventStore
|
|
57
57
|
begin
|
58
58
|
query = EventStore.db.from(@aliased_event_table).where(:aggregate_id => @id.to_s)
|
59
59
|
query = query.join(@names_table, id: :fully_qualified_name_id) if EventStore.use_names_table?
|
60
|
-
query = query.order
|
60
|
+
query = query.order { events[:id] }.select_all(:events)
|
61
61
|
query = query.select_append(:fully_qualified_name) if EventStore.use_names_table?
|
62
62
|
query
|
63
63
|
end
|
64
64
|
end
|
65
65
|
|
66
66
|
def snapshot_events
|
67
|
-
last_checkpoint =
|
67
|
+
last_checkpoint = nil
|
68
|
+
|
69
|
+
if checkpoint_events
|
70
|
+
checkpoints = last_event_before(Time.now.utc, checkpoint_events)
|
71
|
+
if checkpoints.map { |e| e[:fully_qualified_name] }.uniq.length > 1
|
72
|
+
raise "unexpected multiple checkpoint event types"
|
73
|
+
end
|
74
|
+
last_checkpoint = checkpoints.last
|
75
|
+
end
|
68
76
|
|
69
77
|
if last_checkpoint
|
70
|
-
events.where{
|
78
|
+
events.where{ events[:id] >= last_checkpoint[:id].to_i }
|
71
79
|
else
|
72
80
|
events
|
73
81
|
end
|
@@ -75,7 +83,7 @@ module EventStore
|
|
75
83
|
|
76
84
|
def events_from(event_id, max = nil)
|
77
85
|
# note: this depends on the events table being aliased to "events" above.
|
78
|
-
events.limit(max).where{
|
86
|
+
events.limit(max).where{events[:id] >= event_id.to_i }.all.map do |event|
|
79
87
|
event[:serialized_event] = EventStore.unescape_bytea(event[:serialized_event])
|
80
88
|
event
|
81
89
|
end
|
@@ -99,8 +107,8 @@ module EventStore
|
|
99
107
|
timestampz = start_time.strftime("%Y-%m-%d %H:%M:%S%z")
|
100
108
|
|
101
109
|
rows = fully_qualified_names.inject([]) { |memo, name|
|
102
|
-
memo + events.where(
|
103
|
-
.select { max(:
|
110
|
+
memo + events.where(Sequel.qualify("events", "id") => events.where(fully_qualified_name: name).where { occurred_at < timestampz }
|
111
|
+
.select { max(events[:id]) }.unordered.group(:sub_key)).all
|
104
112
|
}.sort_by { |r| r[:occurred_at] }
|
105
113
|
|
106
114
|
rows.map {|r| r[:serialized_event] = EventStore.unescape_bytea(r[:serialized_event]); r}
|
data/lib/event_store/snapshot.rb
CHANGED
@@ -8,7 +8,7 @@ module EventStore
|
|
8
8
|
|
9
9
|
def initialize aggregate
|
10
10
|
@aggregate = aggregate
|
11
|
-
@redis = EventStore.redis
|
11
|
+
@redis = EventStore.redis(aggregate.id)
|
12
12
|
@snapshot_table = "#{@aggregate.type}_snapshots_for_#{@aggregate.id}"
|
13
13
|
@snapshot_event_id_table = "#{@aggregate.type}_snapshot_event_ids_for_#{@aggregate.id}"
|
14
14
|
end
|
@@ -65,7 +65,7 @@ module EventStore
|
|
65
65
|
end
|
66
66
|
|
67
67
|
def delete_snapshot!
|
68
|
-
|
68
|
+
@redis.del [snapshot_table, snapshot_event_id_table]
|
69
69
|
end
|
70
70
|
|
71
71
|
def store_snapshot(prepared_events, logger=default_logger)
|
data/lib/event_store/version.rb
CHANGED
@@ -56,7 +56,7 @@ describe EventStore::Client do
|
|
56
56
|
end
|
57
57
|
|
58
58
|
it "should be empty for aggregates without events" do
|
59
|
-
stream = es_client.new(100, :device).raw_event_stream
|
59
|
+
stream = es_client.new("100", :device).raw_event_stream
|
60
60
|
expect(stream.empty?).to be_truthy
|
61
61
|
end
|
62
62
|
|
@@ -80,7 +80,7 @@ describe EventStore::Client do
|
|
80
80
|
end
|
81
81
|
|
82
82
|
it "should be empty for aggregates without events" do
|
83
|
-
stream = es_client.new(100, :device).raw_event_stream
|
83
|
+
stream = es_client.new("100", :device).raw_event_stream
|
84
84
|
expect(stream.empty?).to be_truthy
|
85
85
|
end
|
86
86
|
|
@@ -116,6 +116,7 @@ describe EventStore::Client do
|
|
116
116
|
end
|
117
117
|
end
|
118
118
|
|
119
|
+
|
119
120
|
describe "#raw_event_streams_from_event_id" do
|
120
121
|
subject { es_client.new(AGGREGATE_ID_ONE, :device) }
|
121
122
|
let(:raw_stream) { subject.raw_event_stream }
|
@@ -0,0 +1,69 @@
|
|
1
|
+
require "spec_helper"
|
2
|
+
|
3
|
+
module EventStore
|
4
|
+
describe EventStream do
|
5
|
+
let(:aggregate_id) { SecureRandom.uuid }
|
6
|
+
let(:checkpoint_events) {
|
7
|
+
%w[ checkpoint_event_1 checkpoint_event_2 ]
|
8
|
+
}
|
9
|
+
let(:aggregate) {
|
10
|
+
Aggregate.new(
|
11
|
+
aggregate_id,
|
12
|
+
EventStore.table_name,
|
13
|
+
checkpoint_events
|
14
|
+
)
|
15
|
+
}
|
16
|
+
|
17
|
+
let(:event_time) { Time.parse("2001-01-01 00:00:00 UTC") }
|
18
|
+
|
19
|
+
let(:events) {
|
20
|
+
[EventStore::Event.new(aggregate_id, (event_time - 2000).utc, "old_event", "zone", "#{1000.to_s(2)}_foo"),
|
21
|
+
EventStore::Event.new(aggregate_id, (event_time - 1000).utc, "checkpoint_event_2", "zone", "#{1001.to_s(2)}_foo"),
|
22
|
+
EventStore::Event.new(aggregate_id, (event_time + 100).utc, "after_checkpoint_1", "zone", "#{1002.to_s(2)}_foo"),
|
23
|
+
EventStore::Event.new(aggregate_id, (event_time).utc, "after_checkpoint_2", "zone", "#{12.to_s(2)}_foo")]
|
24
|
+
}
|
25
|
+
|
26
|
+
subject(:event_stream) { EventStream.new aggregate }
|
27
|
+
|
28
|
+
let(:logger) { Logger.new("/dev/null") }
|
29
|
+
|
30
|
+
before(:each) do
|
31
|
+
event_stream.append events, logger
|
32
|
+
end
|
33
|
+
|
34
|
+
describe "#snapshot_events" do
|
35
|
+
it "returns events since the last of one of multiple checkpoint events" do
|
36
|
+
snapshot_events = event_stream.snapshot_events
|
37
|
+
|
38
|
+
expect(snapshot_events.count).to eql(3)
|
39
|
+
|
40
|
+
expect(
|
41
|
+
snapshot_events.find { |event| event[:fully_qualified_name] == 'checkpoint_event_2' }
|
42
|
+
).not_to be_nil
|
43
|
+
|
44
|
+
expect(
|
45
|
+
snapshot_events.find { |event| event[:fully_qualified_name] == 'after_checkpoint_1' }
|
46
|
+
).not_to be_nil
|
47
|
+
|
48
|
+
expect(
|
49
|
+
snapshot_events.find { |event| event[:fully_qualified_name] == 'after_checkpoint_2' }
|
50
|
+
).not_to be_nil
|
51
|
+
end
|
52
|
+
|
53
|
+
it "raises an exception if multiple event types are returned" do
|
54
|
+
event = EventStore::Event.new(aggregate_id,
|
55
|
+
(event_time - 1000).utc,
|
56
|
+
"checkpoint_event_1",
|
57
|
+
"zone",
|
58
|
+
"#{1001.to_s(2)}_foo")
|
59
|
+
|
60
|
+
event_stream.append [event], logger
|
61
|
+
|
62
|
+
expect { event_stream.snapshot_events }.to raise_error do |error|
|
63
|
+
expect(error).to be_a(RuntimeError)
|
64
|
+
expect(error.message).to eql("unexpected multiple checkpoint event types")
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
@@ -6,13 +6,15 @@ AGGREGATE_ID_TWO = SecureRandom.uuid
|
|
6
6
|
|
7
7
|
module EventStore
|
8
8
|
describe "Snapshots" do
|
9
|
+
let(:redis) { EventStore.redis("") }
|
10
|
+
|
9
11
|
context "when there are no events" do
|
10
|
-
let(:client)
|
12
|
+
let(:client) { EventStore::Client.new(AGGREGATE_ID_ONE) }
|
11
13
|
|
12
14
|
it "should build an empty snapshot for a new client" do
|
13
15
|
expect(client.snapshot.any?).to eq(false)
|
14
16
|
expect(client.event_id).to eq(-1)
|
15
|
-
expect(
|
17
|
+
expect(redis.hget(client.snapshot_event_id_table, :current_event_id)).to eq(nil)
|
16
18
|
end
|
17
19
|
|
18
20
|
it "a client should rebuild a snapshot" do
|
@@ -104,8 +106,8 @@ module EventStore
|
|
104
106
|
before(:each) { snapshot.reject! { true } }
|
105
107
|
|
106
108
|
it "deletes the snapshot out of Redis" do
|
107
|
-
expect(
|
108
|
-
expect(
|
109
|
+
expect(redis.keys(snapshot.snapshot_table).length).to eq(0)
|
110
|
+
expect(redis.keys(snapshot.snapshot_event_id_table).length).to eq(0)
|
109
111
|
end
|
110
112
|
end
|
111
113
|
end
|
@@ -3,7 +3,7 @@ require "mock_redis"
|
|
3
3
|
|
4
4
|
module EventStore
|
5
5
|
describe Snapshot do
|
6
|
-
let(:redis) { EventStore.redis }
|
6
|
+
let(:redis) { EventStore.redis("") } # there's only one in test env anyway
|
7
7
|
let(:aggregate_type) { "awesome" }
|
8
8
|
let(:aggregate_id) { "superman" }
|
9
9
|
let(:events) { [] }
|
@@ -14,7 +14,7 @@ module EventStore
|
|
14
14
|
type: aggregate_type,
|
15
15
|
id: aggregate_id,
|
16
16
|
events: double(all: events),
|
17
|
-
|
17
|
+
checkpoint_events: [checkpoint_event],
|
18
18
|
snapshot_events: double(all: snapshot_events))
|
19
19
|
}
|
20
20
|
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: nexia_event_store
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.11.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Paul Saieg, John Colvin
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date:
|
12
|
+
date: 2017-08-17 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: bundler
|
@@ -208,6 +208,7 @@ files:
|
|
208
208
|
- spec/event_store/client_spec.rb
|
209
209
|
- spec/event_store/config_spec.rb
|
210
210
|
- spec/event_store/event_store_spec.rb
|
211
|
+
- spec/event_store/event_stream_spec.rb
|
211
212
|
- spec/event_store/serialized_binary_event_data.txt
|
212
213
|
- spec/event_store/snapshot_spec.rb
|
213
214
|
- spec/event_store/vertica guy notes.txt
|
@@ -235,7 +236,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
235
236
|
version: '0'
|
236
237
|
requirements: []
|
237
238
|
rubyforge_project:
|
238
|
-
rubygems_version: 2.
|
239
|
+
rubygems_version: 2.5.1
|
239
240
|
signing_key:
|
240
241
|
specification_version: 4
|
241
242
|
summary: Ruby implementation of an EventSource (A+ES) for the Nexia Ecosystem
|
@@ -247,6 +248,7 @@ test_files:
|
|
247
248
|
- spec/event_store/client_spec.rb
|
248
249
|
- spec/event_store/config_spec.rb
|
249
250
|
- spec/event_store/event_store_spec.rb
|
251
|
+
- spec/event_store/event_stream_spec.rb
|
250
252
|
- spec/event_store/serialized_binary_event_data.txt
|
251
253
|
- spec/event_store/snapshot_spec.rb
|
252
254
|
- spec/event_store/vertica guy notes.txt
|