nexia_event_store 0.2.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,30 @@
1
+ # require_relative '../../protocol_buffers/lib/protocol_buffers'
2
+ require 'faceplate_api'
3
+ require "faceplate_api/thermostats/test_support"
4
+ require 'securerandom'
5
+ require 'time'
6
+ include FaceplateApi
7
+ event_names = [:firmware_version_updated, :fan_on_updated, :fan_mode_updated, :configuration_lock_updated, :display_lock_updated,
8
+ :mode_updated, :system_name_updated, :operation_status_updated, :relative_airflow_updated, :balance_point_updated, :indoor_temperature_updated,
9
+ :temperature_setpoint_updated, :sensor_added, :sensor_removed, :sensor_updated, :zone_added, :zone_removed, :zone_updated, :preset_added,
10
+ :preset_removed, :preset_updated, :preset_activated, :relative_humidity_setpoint_updated, :event_schedule_added, :event_schedule_removed, :event_schedule_updated, :event_schedule_activated]
11
+
12
+ aggregate_ids = ["ASDFDS12939", "1SQFDS12B39", "103MMV", SecureRandom.uuid, SecureRandom.uuid, "10PM93BU37"]
13
+ ITERATIONS = 5
14
+ versions_per_device = (0..(event_names.length * ITERATIONS)).to_a
15
+
16
+ mothers = {}
17
+ aggregate_ids.each do |aggregate_id|
18
+ mother = FaceplateApi::EventFixture.new(header: {device_id: aggregate_id}).event_mother
19
+ mothers[mother] = versions_per_device.dup
20
+ end
21
+
22
+ File.open('./data.sql', 'w') do |f|
23
+ (event_names * ITERATIONS * ITERATIONS).shuffle.each do |name|
24
+ event_mother = mothers.keys.sample
25
+ event = event_mother.send(name)
26
+ version = mothers[event_mother].shift
27
+ f.puts "INSERT INTO events.device_events(aggregate_id, version, occurred_at, serialized_event, fully_qualified_name) values ('#{event_mother.device_id}', #{version}, '#{DateTime.now.iso8601}', '#{event.to_s}', '#{name}');"
28
+ end
29
+ f.puts 'commit;'
30
+ end
@@ -0,0 +1,62 @@
1
+ require 'event_store'
2
+ Sequel.migration do
3
+ up do
4
+
5
+ run %Q<CREATE TABLE #{EventStore.fully_qualified_table} (
6
+ id AUTO_INCREMENT PRIMARY KEY,
7
+ version BIGINT NOT NULL,
8
+ aggregate_id varchar(36) NOT NULL,
9
+ fully_qualified_name varchar(512) NOT NULL,
10
+ occurred_at TIMESTAMPTZ NOT NULL,
11
+ serialized_event VARBINARY(32768) NOT NULL)
12
+
13
+ PARTITION BY EXTRACT(year FROM occurred_at AT TIME ZONE 'UTC')*100 + EXTRACT(month FROM occurred_at AT TIME ZONE 'UTC');
14
+
15
+ CREATE PROJECTION #{EventStore.fully_qualified_table}_super_projecion /*+createtype(D)*/
16
+ (
17
+ id ENCODING COMMONDELTA_COMP,
18
+ version ENCODING COMMONDELTA_COMP,
19
+ aggregate_id ENCODING RLE,
20
+ fully_qualified_name ENCODING AUTO,
21
+ occurred_at ENCODING BLOCKDICT_COMP,
22
+ serialized_event ENCODING AUTO
23
+ )
24
+ AS
25
+ SELECT id,
26
+ version,
27
+ aggregate_id,
28
+ fully_qualified_name,
29
+ occurred_at,
30
+ serialized_event
31
+ FROM #{EventStore.fully_qualified_table}
32
+ ORDER BY aggregate_id,
33
+ version
34
+ SEGMENTED BY HASH(aggregate_id) ALL NODES
35
+ KSAFE 1;
36
+
37
+ CREATE PROJECTION #{EventStore.fully_qualified_table}_runtime_history_projection /*+createtype(D)*/
38
+ (
39
+ version ENCODING DELTAVAL,
40
+ aggregate_id ENCODING RLE,
41
+ fully_qualified_name ENCODING RLE,
42
+ occurred_at ENCODING RLE,
43
+ serialized_event ENCODING AUTO
44
+ )
45
+ AS
46
+ SELECT version,
47
+ aggregate_id,
48
+ fully_qualified_name,
49
+ occurred_at,
50
+ serialized_event
51
+ FROM #{EventStore.fully_qualified_table}
52
+ ORDER BY aggregate_id,
53
+ occurred_at,
54
+ fully_qualified_name
55
+ SEGMENTED BY HASH(aggregate_id) ALL NODES
56
+ KSAFE 1;>
57
+ end
58
+
59
+ down do
60
+ run 'DROP SCHEMA #{EventStore.schema} CASCADE;'
61
+ end
62
+ end
@@ -0,0 +1,17 @@
1
+ require 'event_store'
2
+ Sequel.migration do
3
+ change do
4
+ create_table((EventStore.schema + "__" + EventStore.table_name).to_sym) do
5
+ primary_key :id
6
+ Bignum :version
7
+ index :version
8
+ String :aggregate_id
9
+ index :aggregate_id
10
+ String :fully_qualified_name
11
+ index :fully_qualified_name
12
+ DateTime :occurred_at
13
+ index :occurred_at
14
+ bytea :serialized_event
15
+ end
16
+ end
17
+ end
@@ -0,0 +1,4 @@
1
+ /* intentional: this will not drop and recreate the database if it already exists */
2
+ /* Mainly for CI. If you install with brew, you likely have authentication configured as "TRUST" for all */
3
+ CREATE USER nexia WITH UNENCRYPTED PASSWORD 'Password1';
4
+ GRANT ALL ON DATABASE history_store TO nexia;
@@ -0,0 +1,36 @@
1
+ # coding: utf-8
2
+ lib = File.expand_path('../lib', __FILE__)
3
+ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4
+ require 'event_store/version'
5
+
6
+ Gem::Specification.new do |spec|
7
+ spec.name = 'nexia_event_store'
8
+ spec.version = EventStore::VERSION
9
+ spec.authors = ['Paul Saieg, John Colvin', 'Stuart Nelson']
10
+ spec.description = ['A Ruby implementation of an EventSource (A+ES) tuned for Vertica or Postgres']
11
+ spec.email = ['classicist@gmail.com, jgeiger@gmail.com']
12
+ spec.summary = %q{Ruby implementation of an EventSource (A+ES) for the Nexia Ecosystem}
13
+ spec.homepage = 'https://github.com/nexiahome/event_store'
14
+ spec.license = 'MIT'
15
+
16
+ spec.files = `git ls-files`.split($/)
17
+ spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
18
+ spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
19
+ spec.require_paths = ['lib']
20
+
21
+ spec.add_development_dependency 'bundler', '~> 1.7'
22
+ spec.add_development_dependency 'rake', '~> 0'
23
+ spec.add_development_dependency 'rspec', '~> 3.1'
24
+ spec.add_development_dependency 'simplecov', '~> 0.9'
25
+ spec.add_development_dependency 'simplecov-rcov', '~> 0.2'
26
+ spec.add_development_dependency 'guard-rspec', '~> 4.3'
27
+ spec.add_development_dependency 'pry-byebug', '~> 2.0'
28
+ spec.add_development_dependency 'mock_redis', '~> 0.13'
29
+
30
+ spec.add_dependency 'sequel', '~> 4.14'
31
+ spec.add_dependency 'nexia-sequel-vertica', '~> 0.2'
32
+ spec.add_dependency 'pg', '~> 0.17'
33
+ spec.add_dependency 'redis', '~> 3.1'
34
+ spec.add_dependency 'hiredis', '~> 0.5'
35
+ spec.add_development_dependency 'rspec_junit_formatter', '~> 0.2'
36
+ end
@@ -0,0 +1,93 @@
1
+ module EventStore
2
+ class Aggregate
3
+
4
+ attr_reader :id, :type, :snapshot_table, :snapshot_version_table, :event_table
5
+
6
+ def self.count
7
+ EventStore.db.from( EventStore.fully_qualified_table).distinct(:aggregate_id).count
8
+ end
9
+
10
+ def self.ids(offset, limit)
11
+ EventStore.db.from( EventStore.fully_qualified_table).distinct(:aggregate_id).select(:aggregate_id).order(:aggregate_id).limit(limit, offset).all.map{|item| item[:aggregate_id]}
12
+ end
13
+
14
+ def initialize(id, type = EventStore.table_name)
15
+ @id = id
16
+ @type = type
17
+ @schema = EventStore.schema
18
+ @event_table = EventStore.fully_qualified_table
19
+ @snapshot_table = "#{@type}_snapshots_for_#{@id}"
20
+ @snapshot_version_table = "#{@type}_snapshot_versions_for_#{@id}"
21
+ end
22
+
23
+ def events
24
+ @events_query ||= EventStore.db.from(@event_table).where(:aggregate_id => @id.to_s).order(:version)
25
+ end
26
+
27
+ def snapshot
28
+ events_hash = auto_rebuild_snapshot(read_raw_snapshot)
29
+ snap = []
30
+ events_hash.each_pair do |key, value|
31
+ raw_event = value.split(EventStore::SNAPSHOT_DELIMITER)
32
+ fully_qualified_name = key
33
+ version = raw_event.first.to_i
34
+ serialized_event = EventStore.unescape_bytea(raw_event[1])
35
+ occurred_at = Time.parse(raw_event.last)
36
+ snap << SerializedEvent.new(fully_qualified_name, serialized_event, version, occurred_at)
37
+ end
38
+ snap.sort {|a,b| a.version <=> b.version}
39
+ end
40
+
41
+ def rebuild_snapshot!
42
+ delete_snapshot!
43
+ corrected_events = events.all.map{|e| e[:occurred_at] = TimeHacker.translate_occurred_at_from_local_to_gmt(e[:occurred_at]); e}
44
+ EventAppender.new(self).store_snapshot(corrected_events)
45
+ end
46
+
47
+ def events_from(version_number, max = nil)
48
+ events.limit(max).where{ version >= version_number.to_i }.all.map do |event|
49
+ event[:serialized_event] = EventStore.unescape_bytea(event[:serialized_event])
50
+ event
51
+ end
52
+ end
53
+
54
+ def event_stream_between(start_time, end_time, fully_qualified_names = [])
55
+ query = events.where(occurred_at: start_time..end_time)
56
+ query = query.where(fully_qualified_name: fully_qualified_names) if fully_qualified_names && fully_qualified_names.any?
57
+ query.all.map {|e| e[:serialized_event] = EventStore.unescape_bytea(e[:serialized_event]); e}
58
+ end
59
+
60
+ def event_stream
61
+ events.all.map {|e| e[:serialized_event] = EventStore.unescape_bytea(e[:serialized_event]); e}
62
+ end
63
+
64
+ def last_event
65
+ snapshot.last
66
+ end
67
+
68
+ def version
69
+ (EventStore.redis.hget(@snapshot_version_table, :current_version) || -1).to_i
70
+ end
71
+
72
+ def delete_snapshot!
73
+ EventStore.redis.del [@snapshot_table, @snapshot_version_table]
74
+ end
75
+
76
+ def delete_events!
77
+ events.delete
78
+ end
79
+
80
+ private
81
+ def auto_rebuild_snapshot(events_hash)
82
+ return events_hash unless events_hash.empty?
83
+ event = events.select(:version).limit(1).all
84
+ return events_hash if event.nil?
85
+ rebuild_snapshot!
86
+ events_hash = read_raw_snapshot
87
+ end
88
+
89
+ def read_raw_snapshot
90
+ EventStore.redis.hgetall(@snapshot_table)
91
+ end
92
+ end
93
+ end
@@ -0,0 +1,99 @@
1
+ module EventStore
2
+ class Client
3
+
4
+ def self.count
5
+ Aggregate.count
6
+ end
7
+
8
+ def self.ids(offset, limit)
9
+ Aggregate.ids(offset, limit)
10
+ end
11
+
12
+ def initialize( aggregate_id, aggregate_type = EventStore.table_name)
13
+ @aggregate = Aggregate.new(aggregate_id, aggregate_type)
14
+ end
15
+
16
+ def id
17
+ @aggregate.id
18
+ end
19
+
20
+ def type
21
+ @aggregate.type
22
+ end
23
+
24
+ def event_table
25
+ @aggregate.event_table
26
+ end
27
+
28
+ def append event_data
29
+ event_appender.append(event_data)
30
+ yield(event_data) if block_given?
31
+ nil
32
+ end
33
+
34
+ def snapshot
35
+ raw_snapshot
36
+ end
37
+
38
+ def event_stream
39
+ translate_events raw_event_stream
40
+ end
41
+
42
+ def event_stream_from version_number, max=nil
43
+ translate_events @aggregate.events_from(version_number, max)
44
+ end
45
+
46
+ def event_stream_between(start_time, end_time, fully_qualified_names = [])
47
+ translate_events @aggregate.event_stream_between(start_time, end_time, fully_qualified_names)
48
+ end
49
+
50
+ def peek
51
+ translate_event @aggregate.last_event
52
+ end
53
+
54
+ def raw_snapshot
55
+ @aggregate.snapshot
56
+ end
57
+
58
+ def raw_event_stream
59
+ @aggregate.event_stream
60
+ end
61
+
62
+ def raw_event_stream_from version_number, max=nil
63
+ @aggregate.events_from(version_number, max)
64
+ end
65
+
66
+ def version
67
+ @aggregate.version
68
+ end
69
+
70
+ def count
71
+ event_stream.length
72
+ end
73
+
74
+ def destroy!
75
+ @aggregate.delete_events!
76
+ @aggregate.delete_snapshot!
77
+ end
78
+
79
+ def rebuild_snapshot!
80
+ @aggregate.delete_snapshot!
81
+ @aggregate.rebuild_snapshot!
82
+ end
83
+
84
+ private
85
+
86
+ def event_appender
87
+ EventAppender.new(@aggregate)
88
+ end
89
+
90
+ def translate_events(event_hashs)
91
+ event_hashs.map { |eh| translate_event(eh) }
92
+ end
93
+
94
+ def translate_event(event_hash)
95
+ occurred_at = TimeHacker.translate_occurred_at_from_local_to_gmt(event_hash[:occurred_at])
96
+ SerializedEvent.new event_hash[:fully_qualified_name], event_hash[:serialized_event], event_hash[:version], occurred_at
97
+ end
98
+ end
99
+ end
@@ -0,0 +1,4 @@
1
+ module EventStore
2
+ class AttributeMissingError < StandardError; end
3
+ class ConcurrencyError < StandardError; end
4
+ end
@@ -0,0 +1,84 @@
1
+ module EventStore
2
+ class EventAppender
3
+
4
+ def initialize aggregate
5
+ @aggregate = aggregate
6
+ end
7
+
8
+ def append raw_events
9
+ EventStore.db.transaction do
10
+ set_current_version
11
+
12
+ prepared_events = raw_events.map do |raw_event|
13
+ event = prepare_event(raw_event)
14
+ validate! event
15
+ raise concurrency_error(event) if has_concurrency_issue?(event)
16
+ event
17
+ end
18
+ # All concurrency issues need to be checked before persisting any of the events
19
+ # Otherwise, the newly appended events may raise erroneous concurrency errors
20
+ result = @aggregate.events.multi_insert(prepared_events)
21
+ store_snapshot(prepared_events) unless result.nil?
22
+ result
23
+ end
24
+ end
25
+
26
+ def store_snapshot(prepared_events)
27
+ r = EventStore.redis
28
+ current_version_numbers = r.hgetall(@aggregate.snapshot_version_table)
29
+ current_version_numbers.default = -1
30
+ valid_snapshot_events = []
31
+ valid_snapshot_versions = []
32
+ prepared_events.each do |event|
33
+ if event[:version].to_i > current_version_numbers[event[:fully_qualified_name]].to_i
34
+ valid_snapshot_events << event[:fully_qualified_name]
35
+ valid_snapshot_events << (event[:version].to_s + EventStore::SNAPSHOT_DELIMITER + event[:serialized_event] + EventStore::SNAPSHOT_DELIMITER + event[:occurred_at].to_s)
36
+ valid_snapshot_versions << event[:fully_qualified_name]
37
+ valid_snapshot_versions << event[:version]
38
+ end
39
+ end
40
+ unless valid_snapshot_versions.empty?
41
+ last_version = valid_snapshot_versions.last
42
+ valid_snapshot_versions << :current_version
43
+ valid_snapshot_versions << last_version.to_i
44
+ r.multi do
45
+ r.hmset(@aggregate.snapshot_version_table, valid_snapshot_versions)
46
+ r.hmset(@aggregate.snapshot_table, valid_snapshot_events)
47
+ end
48
+ end
49
+ end
50
+
51
+ private
52
+ def has_concurrency_issue? event
53
+ event[:version] <= current_version
54
+ end
55
+
56
+ def prepare_event raw_event
57
+ raise ArgumentError.new("Cannot Append a Nil Event") unless raw_event
58
+ { :version => raw_event.version.to_i,
59
+ :aggregate_id => raw_event.aggregate_id,
60
+ :occurred_at => Time.parse(raw_event.occurred_at.to_s).utc, #to_s truncates microseconds, which brake Time equality
61
+ :serialized_event => EventStore.escape_bytea(raw_event.serialized_event),
62
+ :fully_qualified_name => raw_event.fully_qualified_name }
63
+ end
64
+
65
+ def concurrency_error event
66
+ ConcurrencyError.new("The version of the event being added (version #{event[:version]}) is <= the current version (version #{current_version})")
67
+ end
68
+
69
+ private
70
+ def current_version
71
+ @current_version ||= @aggregate.version
72
+ end
73
+ alias :set_current_version :current_version
74
+
75
+ def validate! event_hash
76
+ [:aggregate_id, :fully_qualified_name, :occurred_at, :serialized_event, :version].each do |attribute_name|
77
+ if event_hash[attribute_name].to_s.strip.empty?
78
+ raise AttributeMissingError, "value required for #{attribute_name}"
79
+ end
80
+ end
81
+ end
82
+
83
+ end
84
+ end
@@ -0,0 +1,16 @@
1
+ module EventStore
2
+ class TimeHacker
3
+ class << self
4
+ #Hack around various DB adapters that hydrate dates from the db into the local ruby timezone
5
+ def translate_occurred_at_from_local_to_gmt(occurred_at)
6
+ if occurred_at.class == Time
7
+ #expecting "2001-02-03 01:26:40 -0700"
8
+ Time.parse(occurred_at.to_s.gsub(/\s[+-]\d+$/, ' UTC'))
9
+ elsif occurred_at.class == DateTime
10
+ #expecting "2001-02-03T01:26:40+00:00"
11
+ Time.parse(occurred_at.iso8601.gsub('T', ' ').gsub(/[+-]\d{2}\:\d{2}/, ' UTC'))
12
+ end
13
+ end
14
+ end
15
+ end
16
+ end