event_store 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +27 -0
- data/.rspec +3 -0
- data/Gemfile +4 -0
- data/Guardfile +6 -0
- data/LICENSE.txt +22 -0
- data/README.md +53 -0
- data/Rakefile +15 -0
- data/db/database.yml +41 -0
- data/db/event_store_db_designer_common_queries.sql +4 -0
- data/db/event_store_db_designer_event_data.sql +3585 -0
- data/db/event_store_sample_data_generator.rb +30 -0
- data/db/migrations/001_create_event_store_events.rb +60 -0
- data/db/pg_migrations/001_create_event_store_events.rb +17 -0
- data/event_store.gemspec +34 -0
- data/lib/event_store.rb +113 -0
- data/lib/event_store/aggregate.rb +72 -0
- data/lib/event_store/client.rb +87 -0
- data/lib/event_store/errors.rb +4 -0
- data/lib/event_store/event_appender.rb +83 -0
- data/lib/event_store/time_hacker.rb +16 -0
- data/lib/event_store/version.rb +3 -0
- data/spec/benchmark/bench.rb +34 -0
- data/spec/benchmark/memory_profile.rb +48 -0
- data/spec/benchmark/seed_db.rb +45 -0
- data/spec/event_store/client_spec.rb +287 -0
- data/spec/event_store/snapshot_spec.rb +45 -0
- data/spec/event_store/vertica guy notes.txt +23 -0
- data/spec/spec_helper.rb +28 -0
- metadata +276 -0
@@ -0,0 +1,30 @@
|
|
1
|
+
# require_relative '../../protocol_buffers/lib/protocol_buffers'
|
2
|
+
require 'faceplate_api'
|
3
|
+
require "faceplate_api/thermostats/test_support/event_mother"
|
4
|
+
require 'securerandom'
|
5
|
+
require 'time'
|
6
|
+
include FaceplateApi
|
7
|
+
event_names = [:firmware_version_updated, :fan_on_updated, :fan_mode_updated, :configuration_lock_updated, :display_lock_updated,
|
8
|
+
:mode_updated, :system_name_updated, :operation_status_updated, :relative_airflow_updated, :balance_point_updated, :indoor_temperature_updated,
|
9
|
+
:temperature_setpoint_updated, :sensor_added, :sensor_removed, :sensor_updated, :zone_added, :zone_removed, :zone_updated, :preset_added,
|
10
|
+
:preset_removed, :preset_updated, :preset_activated, :relative_humidity_setpoint_updated, :event_schedule_added, :event_schedule_removed, :event_schedule_updated, :event_schedule_activated]
|
11
|
+
|
12
|
+
aggregate_ids = ["ASDFDS12939", "1SQFDS12B39", "103MMV", SecureRandom.uuid, SecureRandom.uuid, "10PM93BU37"]
|
13
|
+
ITERATIONS = 5
|
14
|
+
versions_per_device = (0..(event_names.length * ITERATIONS)).to_a
|
15
|
+
|
16
|
+
mothers = {}
|
17
|
+
aggregate_ids.each do |aggregate_id|
|
18
|
+
mother = Thermostats::EventMother.new(device_id: aggregate_id)
|
19
|
+
mothers[mother] = versions_per_device.dup
|
20
|
+
end
|
21
|
+
|
22
|
+
File.open('./data.sql', 'w') do |f|
|
23
|
+
(event_names * ITERATIONS * ITERATIONS).shuffle.each do |name|
|
24
|
+
event_mother = mothers.keys.sample
|
25
|
+
event = event_mother.send(name)
|
26
|
+
version = mothers[event_mother].shift
|
27
|
+
f.puts "INSERT INTO events.device_events(aggregate_id, version, occurred_at, serialized_event, fully_qualified_name) values ('#{event_mother.device_id}', #{version}, '#{DateTime.now.iso8601}', '#{event.to_s}', '#{name}');"
|
28
|
+
end
|
29
|
+
f.puts 'commit;'
|
30
|
+
end
|
@@ -0,0 +1,60 @@
|
|
1
|
+
require 'event_store'
|
2
|
+
Sequel.migration do
|
3
|
+
up do
|
4
|
+
|
5
|
+
run %Q<CREATE TABLE #{EventStore.fully_qualified_table} (
|
6
|
+
id AUTO_INCREMENT PRIMARY KEY,
|
7
|
+
version BIGINT NOT NULL,
|
8
|
+
aggregate_id varchar(36) NOT NULL,
|
9
|
+
fully_qualified_name varchar(255) NOT NULL,
|
10
|
+
occurred_at TIMESTAMPTZ NOT NULL,
|
11
|
+
serialized_event VARBINARY(255) NOT NULL)
|
12
|
+
|
13
|
+
PARTITION BY EXTRACT(year FROM occurred_at AT TIME ZONE 'UTC')*100 + EXTRACT(month FROM occurred_at AT TIME ZONE 'UTC');
|
14
|
+
|
15
|
+
CREATE PROJECTION #{EventStore.fully_qualified_table}_super_projecion /*+createtype(D)*/
|
16
|
+
(
|
17
|
+
id ENCODING COMMONDELTA_COMP,
|
18
|
+
version ENCODING COMMONDELTA_COMP,
|
19
|
+
aggregate_id ENCODING RLE,
|
20
|
+
fully_qualified_name ENCODING AUTO,
|
21
|
+
occurred_at ENCODING BLOCKDICT_COMP,
|
22
|
+
serialized_event ENCODING AUTO
|
23
|
+
)
|
24
|
+
AS
|
25
|
+
SELECT id,
|
26
|
+
version,
|
27
|
+
aggregate_id,
|
28
|
+
fully_qualified_name,
|
29
|
+
occurred_at,
|
30
|
+
serialized_event
|
31
|
+
FROM #{EventStore.fully_qualified_table}
|
32
|
+
ORDER BY aggregate_id,
|
33
|
+
version
|
34
|
+
SEGMENTED BY HASH(aggregate_id) ALL NODES;
|
35
|
+
|
36
|
+
CREATE PROJECTION #{EventStore.fully_qualified_table}_runtime_history_projection /*+createtype(D)*/
|
37
|
+
(
|
38
|
+
version ENCODING DELTAVAL,
|
39
|
+
aggregate_id ENCODING RLE,
|
40
|
+
fully_qualified_name ENCODING RLE,
|
41
|
+
occurred_at ENCODING RLE,
|
42
|
+
serialized_event ENCODING AUTO
|
43
|
+
)
|
44
|
+
AS
|
45
|
+
SELECT version,
|
46
|
+
aggregate_id,
|
47
|
+
fully_qualified_name,
|
48
|
+
occurred_at,
|
49
|
+
serialized_event
|
50
|
+
FROM #{EventStore.fully_qualified_table}
|
51
|
+
ORDER BY aggregate_id,
|
52
|
+
occurred_at,
|
53
|
+
fully_qualified_name
|
54
|
+
SEGMENTED BY HASH(aggregate_id) ALL NODES;>
|
55
|
+
end
|
56
|
+
|
57
|
+
down do
|
58
|
+
run 'DROP SCHEMA #{EventStore.schema} CASCADE;'
|
59
|
+
end
|
60
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
require 'event_store'
|
2
|
+
Sequel.migration do
|
3
|
+
change do
|
4
|
+
create_table((EventStore.schema + "__" + EventStore.table_name).to_sym) do
|
5
|
+
primary_key :id
|
6
|
+
Bignum :version
|
7
|
+
index :version
|
8
|
+
String :aggregate_id
|
9
|
+
index :aggregate_id
|
10
|
+
String :fully_qualified_name
|
11
|
+
index :fully_qualified_name
|
12
|
+
DateTime :occurred_at
|
13
|
+
index :occurred_at
|
14
|
+
bytea :serialized_event
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
data/event_store.gemspec
ADDED
@@ -0,0 +1,34 @@
|
|
1
|
+
# coding: utf-8
|
2
|
+
lib = File.expand_path('../lib', __FILE__)
|
3
|
+
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
|
4
|
+
require 'event_store/version'
|
5
|
+
|
6
|
+
Gem::Specification.new do |spec|
|
7
|
+
spec.name = "event_store"
|
8
|
+
spec.version = EventStore::VERSION
|
9
|
+
spec.authors = ["Paul Saieg, John Colvin", "Stuart Nelson"]
|
10
|
+
spec.description = ["A Ruby implementation of an EventSource (A+ES) tuned for Vertica or Postgres"]
|
11
|
+
spec.email = ["classicist@gmail.com"]
|
12
|
+
spec.summary = %q{Ruby implementation of an EventSource (A+ES) for the Nexia Ecosystem}
|
13
|
+
spec.homepage = ""
|
14
|
+
spec.license = "MIT"
|
15
|
+
|
16
|
+
spec.files = `git ls-files`.split($/)
|
17
|
+
spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
|
18
|
+
spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
|
19
|
+
spec.require_paths = ["lib"]
|
20
|
+
|
21
|
+
spec.add_development_dependency "bundler", ">= 1.3"
|
22
|
+
spec.add_development_dependency "rake"
|
23
|
+
spec.add_development_dependency "rspec", "~> 2.14"
|
24
|
+
spec.add_development_dependency "simplecov"
|
25
|
+
spec.add_development_dependency "simplecov-rcov"
|
26
|
+
spec.add_development_dependency "guard-rspec"
|
27
|
+
spec.add_development_dependency "pry-debugger"
|
28
|
+
spec.add_development_dependency "mock_redis"
|
29
|
+
|
30
|
+
spec.add_dependency "sequel", "~> 3.42"
|
31
|
+
spec.add_dependency 'sequel-vertica', '~> 0.1.0'
|
32
|
+
spec.add_dependency 'pg', '~> 0.17.1'
|
33
|
+
spec.add_dependency 'redis', "~> 3.0.7"
|
34
|
+
end
|
data/lib/event_store.rb
ADDED
@@ -0,0 +1,113 @@
|
|
1
|
+
require 'sequel'
|
2
|
+
require 'vertica'
|
3
|
+
require 'sequel-vertica'
|
4
|
+
require 'redis'
|
5
|
+
require 'event_store/version'
|
6
|
+
require 'event_store/time_hacker'
|
7
|
+
require 'event_store/event_appender'
|
8
|
+
require 'event_store/aggregate'
|
9
|
+
require 'event_store/client'
|
10
|
+
require 'event_store/errors'
|
11
|
+
Sequel.extension :migration
|
12
|
+
|
13
|
+
module EventStore
|
14
|
+
Event = Struct.new(:aggregate_id, :occurred_at, :fully_qualified_name, :serialized_event, :version)
|
15
|
+
SerializedEvent = Struct.new(:fully_qualified_name, :serialized_event, :version, :occurred_at)
|
16
|
+
SNAPSHOT_DELIMITER = "__NexEvStDelim__"
|
17
|
+
|
18
|
+
def self.db_config(env, adapter)
|
19
|
+
raw_db_config[env.to_s][adapter.to_s]
|
20
|
+
end
|
21
|
+
|
22
|
+
def self.raw_db_config
|
23
|
+
if @raw_db_config.nil?
|
24
|
+
file_path = File.expand_path(__FILE__ + '/../../db/database.yml')
|
25
|
+
@config_file = File.open(file_path,'r')
|
26
|
+
@raw_db_config = YAML.load(@config_file)
|
27
|
+
@config_file.close
|
28
|
+
end
|
29
|
+
@raw_db_config
|
30
|
+
end
|
31
|
+
|
32
|
+
def self.db
|
33
|
+
@db
|
34
|
+
end
|
35
|
+
|
36
|
+
def self.redis
|
37
|
+
@redis
|
38
|
+
end
|
39
|
+
|
40
|
+
def self.connect(*args)
|
41
|
+
@db ||= Sequel.connect(*args)
|
42
|
+
end
|
43
|
+
|
44
|
+
def self.redis_connect(config_hash)
|
45
|
+
@redis ||= Redis.new(config_hash)
|
46
|
+
end
|
47
|
+
|
48
|
+
def self.local_redis_connect
|
49
|
+
@redis_connection ||= redis_connect raw_db_config['redis']
|
50
|
+
end
|
51
|
+
|
52
|
+
def self.schema
|
53
|
+
@schema ||= raw_db_config[@environment][@database]['schema']
|
54
|
+
end
|
55
|
+
|
56
|
+
def self.table_name
|
57
|
+
@table_name ||= raw_db_config['table_name']
|
58
|
+
end
|
59
|
+
|
60
|
+
def self.fully_qualified_table
|
61
|
+
@fully_qualified_table ||= Sequel.lit "#{schema}.#{table_name}"
|
62
|
+
end
|
63
|
+
|
64
|
+
def self.clear!
|
65
|
+
EventStore.db.from(fully_qualified_table).delete
|
66
|
+
EventStore.redis.flushdb
|
67
|
+
end
|
68
|
+
|
69
|
+
def self.postgres(db_env = :test)
|
70
|
+
@database = 'postgres'
|
71
|
+
@environment = db_env.to_s
|
72
|
+
local_redis_connect
|
73
|
+
create_db( @database, @environment)
|
74
|
+
end
|
75
|
+
|
76
|
+
def self.vertica(db_env = :test)
|
77
|
+
@database = 'vertica'
|
78
|
+
@environment = db_env.to_s
|
79
|
+
local_redis_connect
|
80
|
+
create_db(@database, @environment)
|
81
|
+
end
|
82
|
+
|
83
|
+
def self.production(database_config, redis_config)
|
84
|
+
self.redis_connect redis_config
|
85
|
+
self.connect database_config
|
86
|
+
end
|
87
|
+
|
88
|
+
def self.create_db(type, db_env, db_config = nil)
|
89
|
+
@db_type = type
|
90
|
+
db_config ||= self.db_config(db_env, type)
|
91
|
+
if type == 'vertica'
|
92
|
+
#To find the ip address of vertica on your local box (running in a vm)
|
93
|
+
#1. open Settings -> Network and select Wi-Fi
|
94
|
+
#2. open a terminal in the VM
|
95
|
+
#3. do /sbin/ifconfig (ifconfig is not in $PATH)
|
96
|
+
#4. the inet address for en0 is what you want
|
97
|
+
#Hint: if it just hangs, you have have the wrong IP
|
98
|
+
db_config['host'] = vertica_host
|
99
|
+
@migrations_dir = 'db/migrations'
|
100
|
+
else
|
101
|
+
@migrations_dir = 'db/pg_migrations'
|
102
|
+
end
|
103
|
+
|
104
|
+
EventStore.connect db_config
|
105
|
+
schema_exits = @db.table_exists?("#{schema}__schema_info".to_sym)
|
106
|
+
@db.run "CREATE SCHEMA #{EventStore.schema};" unless schema_exits
|
107
|
+
Sequel::Migrator.run(@db, @migrations_dir, :table=> "#{schema}__schema_info".to_sym)
|
108
|
+
end
|
109
|
+
|
110
|
+
def self.vertica_host
|
111
|
+
File.read File.expand_path("../../db/vertica_host_address.txt", __FILE__)
|
112
|
+
end
|
113
|
+
end
|
@@ -0,0 +1,72 @@
|
|
1
|
+
module EventStore
|
2
|
+
class Aggregate
|
3
|
+
|
4
|
+
attr_reader :id, :type, :snapshot_table, :snapshot_version_table, :event_table
|
5
|
+
|
6
|
+
def initialize(id, type = EventStore.table_name)
|
7
|
+
@id = id
|
8
|
+
@type = type
|
9
|
+
@schema = EventStore.schema
|
10
|
+
@event_table = EventStore.fully_qualified_table
|
11
|
+
@snapshot_table = "#{@type}_snapshots_for_#{@id}"
|
12
|
+
@snapshot_version_table = "#{@type}_snapshot_versions_for_#{@id}"
|
13
|
+
end
|
14
|
+
|
15
|
+
def events
|
16
|
+
@events_query ||= EventStore.db.from(@event_table).where(:aggregate_id => @id.to_s).order(:version)
|
17
|
+
end
|
18
|
+
|
19
|
+
def snapshot
|
20
|
+
events_hash = auto_rebuild_snapshot(read_raw_snapshot)
|
21
|
+
snap = []
|
22
|
+
events_hash.each_pair do |key, value|
|
23
|
+
raw_event = value.split(EventStore::SNAPSHOT_DELIMITER)
|
24
|
+
fully_qualified_name = key
|
25
|
+
version = raw_event.first.to_i
|
26
|
+
serialized_event = raw_event[1]
|
27
|
+
occurred_at = Time.parse(raw_event.last)
|
28
|
+
snap << SerializedEvent.new(fully_qualified_name, serialized_event, version, occurred_at)
|
29
|
+
end
|
30
|
+
snap.sort {|a,b| a.version <=> b.version}
|
31
|
+
end
|
32
|
+
|
33
|
+
def rebuild_snapshot!
|
34
|
+
delete_snapshot!
|
35
|
+
corrected_events = events.all.map{|e| e[:occurred_at] = TimeHacker.translate_occurred_at_from_local_to_gmt(e[:occurred_at]); e}
|
36
|
+
EventAppender.new(self).store_snapshot(corrected_events)
|
37
|
+
end
|
38
|
+
|
39
|
+
def events_from(version_number, max = nil)
|
40
|
+
events.limit(max).where{ version >= version_number.to_i }.all
|
41
|
+
end
|
42
|
+
|
43
|
+
def last_event
|
44
|
+
snapshot.last
|
45
|
+
end
|
46
|
+
|
47
|
+
def version
|
48
|
+
(EventStore.redis.hget(@snapshot_version_table, :current_version) || -1).to_i
|
49
|
+
end
|
50
|
+
|
51
|
+
def delete_snapshot!
|
52
|
+
EventStore.redis.del [@snapshot_table, @snapshot_version_table]
|
53
|
+
end
|
54
|
+
|
55
|
+
def delete_events!
|
56
|
+
events.delete
|
57
|
+
end
|
58
|
+
|
59
|
+
private
|
60
|
+
def auto_rebuild_snapshot(events_hash)
|
61
|
+
return events_hash unless events_hash.empty?
|
62
|
+
event = events.select(:version).limit(1).all
|
63
|
+
return events_hash if event.nil?
|
64
|
+
rebuild_snapshot!
|
65
|
+
events_hash = read_raw_snapshot
|
66
|
+
end
|
67
|
+
|
68
|
+
def read_raw_snapshot
|
69
|
+
EventStore.redis.hgetall(@snapshot_table)
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
@@ -0,0 +1,87 @@
|
|
1
|
+
module EventStore
|
2
|
+
class Client
|
3
|
+
|
4
|
+
def initialize( aggregate_id, aggregate_type = EventStore.table_name)
|
5
|
+
@aggregate = Aggregate.new(aggregate_id, aggregate_type)
|
6
|
+
end
|
7
|
+
|
8
|
+
def id
|
9
|
+
@aggregate.id
|
10
|
+
end
|
11
|
+
|
12
|
+
def type
|
13
|
+
@aggregate.type
|
14
|
+
end
|
15
|
+
|
16
|
+
def event_table
|
17
|
+
@aggregate.event_table
|
18
|
+
end
|
19
|
+
|
20
|
+
def append event_data
|
21
|
+
event_appender.append(event_data)
|
22
|
+
yield(event_data) if block_given?
|
23
|
+
nil
|
24
|
+
end
|
25
|
+
|
26
|
+
def snapshot
|
27
|
+
raw_snapshot
|
28
|
+
end
|
29
|
+
|
30
|
+
def event_stream
|
31
|
+
translate_events raw_event_stream
|
32
|
+
end
|
33
|
+
|
34
|
+
def event_stream_from version_number, max=nil
|
35
|
+
translate_events @aggregate.events_from(version_number, max)
|
36
|
+
end
|
37
|
+
|
38
|
+
def peek
|
39
|
+
translate_event @aggregate.last_event
|
40
|
+
end
|
41
|
+
|
42
|
+
def raw_snapshot
|
43
|
+
@aggregate.snapshot
|
44
|
+
end
|
45
|
+
|
46
|
+
def raw_event_stream
|
47
|
+
@aggregate.events.all
|
48
|
+
end
|
49
|
+
|
50
|
+
def raw_event_stream_from version_number, max=nil
|
51
|
+
@aggregate.events_from(version_number, max)
|
52
|
+
end
|
53
|
+
|
54
|
+
def version
|
55
|
+
@aggregate.version
|
56
|
+
end
|
57
|
+
|
58
|
+
def count
|
59
|
+
event_stream.length
|
60
|
+
end
|
61
|
+
|
62
|
+
def destroy!
|
63
|
+
@aggregate.delete_events!
|
64
|
+
@aggregate.delete_snapshot!
|
65
|
+
end
|
66
|
+
|
67
|
+
def rebuild_snapshot!
|
68
|
+
@aggregate.delete_snapshot!
|
69
|
+
@aggregate.rebuild_snapshot!
|
70
|
+
end
|
71
|
+
|
72
|
+
private
|
73
|
+
|
74
|
+
def event_appender
|
75
|
+
EventAppender.new(@aggregate)
|
76
|
+
end
|
77
|
+
|
78
|
+
def translate_events(event_hashs)
|
79
|
+
event_hashs.map { |eh| translate_event(eh) }
|
80
|
+
end
|
81
|
+
|
82
|
+
def translate_event(event_hash)
|
83
|
+
occurred_at = TimeHacker.translate_occurred_at_from_local_to_gmt(event_hash[:occurred_at])
|
84
|
+
SerializedEvent.new event_hash[:fully_qualified_name], event_hash[:serialized_event], event_hash[:version], occurred_at
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
@@ -0,0 +1,83 @@
|
|
1
|
+
module EventStore
|
2
|
+
class EventAppender
|
3
|
+
|
4
|
+
def initialize aggregate
|
5
|
+
@aggregate = aggregate
|
6
|
+
end
|
7
|
+
|
8
|
+
def append raw_events
|
9
|
+
EventStore.db.transaction do
|
10
|
+
set_current_version
|
11
|
+
|
12
|
+
prepared_events = raw_events.map do |raw_event|
|
13
|
+
event = prepare_event(raw_event)
|
14
|
+
validate! event
|
15
|
+
raise concurrency_error(event) if has_concurrency_issue?(event)
|
16
|
+
event
|
17
|
+
end
|
18
|
+
# All concurrency issues need to be checked before persisting any of the events
|
19
|
+
# Otherwise, the newly appended events may raise erroneous concurrency errors
|
20
|
+
result = @aggregate.events.multi_insert(prepared_events)
|
21
|
+
store_snapshot(prepared_events) unless result.nil?
|
22
|
+
result
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
def store_snapshot(prepared_events)
|
27
|
+
r = EventStore.redis
|
28
|
+
current_version_numbers = r.hgetall(@aggregate.snapshot_version_table)
|
29
|
+
current_version_numbers.default = -1
|
30
|
+
valid_snapshot_events = []
|
31
|
+
valid_snapshot_versions = []
|
32
|
+
prepared_events.each do |event|
|
33
|
+
if event[:version].to_i > current_version_numbers[event[:fully_qualified_name]].to_i
|
34
|
+
valid_snapshot_events << event[:fully_qualified_name]
|
35
|
+
valid_snapshot_events << (event[:version].to_s + EventStore::SNAPSHOT_DELIMITER + event[:serialized_event] + EventStore::SNAPSHOT_DELIMITER + event[:occurred_at].to_s)
|
36
|
+
valid_snapshot_versions << event[:fully_qualified_name]
|
37
|
+
valid_snapshot_versions << event[:version]
|
38
|
+
end
|
39
|
+
end
|
40
|
+
unless valid_snapshot_versions.empty?
|
41
|
+
last_version = valid_snapshot_versions.last
|
42
|
+
valid_snapshot_versions << :current_version
|
43
|
+
valid_snapshot_versions << last_version.to_i
|
44
|
+
r.multi do
|
45
|
+
r.hmset(@aggregate.snapshot_version_table, valid_snapshot_versions)
|
46
|
+
r.hmset(@aggregate.snapshot_table, valid_snapshot_events)
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
private
|
52
|
+
def has_concurrency_issue? event
|
53
|
+
event[:version] <= current_version
|
54
|
+
end
|
55
|
+
|
56
|
+
def prepare_event raw_event
|
57
|
+
{ :version => raw_event.version.to_i,
|
58
|
+
:aggregate_id => raw_event.aggregate_id,
|
59
|
+
:occurred_at => Time.parse(raw_event.occurred_at.to_s).utc, #to_s truncates microseconds, which brake Time equality
|
60
|
+
:serialized_event => raw_event.serialized_event,
|
61
|
+
:fully_qualified_name => raw_event.fully_qualified_name }
|
62
|
+
end
|
63
|
+
|
64
|
+
def concurrency_error event
|
65
|
+
ConcurrencyError.new("The version of the event being added (version #{event[:version]}) is <= the current version (version #{current_version})")
|
66
|
+
end
|
67
|
+
|
68
|
+
private
|
69
|
+
def current_version
|
70
|
+
@current_version ||= @aggregate.version
|
71
|
+
end
|
72
|
+
alias :set_current_version :current_version
|
73
|
+
|
74
|
+
def validate! event_hash
|
75
|
+
[:aggregate_id, :fully_qualified_name, :occurred_at, :serialized_event, :version].each do |attribute_name|
|
76
|
+
if event_hash[attribute_name].to_s.strip.empty?
|
77
|
+
raise AttributeMissingError, "value required for #{attribute_name}"
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
end
|
83
|
+
end
|