event_sourcery-postgres 0.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +13 -0
- data/.rspec +3 -0
- data/.travis.yml +12 -0
- data/CHANGELOG.md +32 -0
- data/CODE_OF_CONDUCT.md +74 -0
- data/Gemfile +5 -0
- data/LICENSE.txt +21 -0
- data/README.md +102 -0
- data/Rakefile +6 -0
- data/bin/console +14 -0
- data/bin/setup +15 -0
- data/event_sourcery-postgres.gemspec +33 -0
- data/lib/event_sourcery/postgres.rb +28 -0
- data/lib/event_sourcery/postgres/config.rb +47 -0
- data/lib/event_sourcery/postgres/event_store.rb +144 -0
- data/lib/event_sourcery/postgres/optimised_event_poll_waiter.rb +81 -0
- data/lib/event_sourcery/postgres/projector.rb +42 -0
- data/lib/event_sourcery/postgres/queue_with_interval_callback.rb +33 -0
- data/lib/event_sourcery/postgres/reactor.rb +72 -0
- data/lib/event_sourcery/postgres/schema.rb +150 -0
- data/lib/event_sourcery/postgres/table_owner.rb +74 -0
- data/lib/event_sourcery/postgres/tracker.rb +90 -0
- data/lib/event_sourcery/postgres/version.rb +5 -0
- data/script/bench_reading_events.rb +63 -0
- data/script/bench_writing_events.rb +47 -0
- data/script/demonstrate_event_sequence_id_gaps.rb +181 -0
- metadata +181 -0
@@ -0,0 +1,90 @@
|
|
1
|
+
module EventSourcery
|
2
|
+
module Postgres
|
3
|
+
class Tracker
|
4
|
+
def initialize(connection = EventSourcery::Postgres.config.projections_database,
|
5
|
+
table_name: EventSourcery::Postgres.config.tracker_table_name,
|
6
|
+
obtain_processor_lock: true)
|
7
|
+
@connection = connection
|
8
|
+
@table_name = table_name
|
9
|
+
@obtain_processor_lock = obtain_processor_lock
|
10
|
+
end
|
11
|
+
|
12
|
+
def setup(processor_name = nil)
|
13
|
+
create_table_if_not_exists if EventSourcery::Postgres.config.auto_create_projector_tracker
|
14
|
+
|
15
|
+
unless tracker_table_exists?
|
16
|
+
raise UnableToLockProcessorError, "Projector tracker table does not exist"
|
17
|
+
end
|
18
|
+
|
19
|
+
if processor_name
|
20
|
+
create_track_entry_if_not_exists(processor_name)
|
21
|
+
if @obtain_processor_lock
|
22
|
+
obtain_global_lock_on_processor(processor_name)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
def processed_event(processor_name, event_id)
|
28
|
+
table.
|
29
|
+
where(name: processor_name.to_s).
|
30
|
+
update(last_processed_event_id: event_id)
|
31
|
+
true
|
32
|
+
end
|
33
|
+
|
34
|
+
def processing_event(processor_name, event_id)
|
35
|
+
@connection.transaction do
|
36
|
+
yield
|
37
|
+
processed_event(processor_name, event_id)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
def reset_last_processed_event_id(processor_name)
|
42
|
+
table.where(name: processor_name.to_s).update(last_processed_event_id: 0)
|
43
|
+
end
|
44
|
+
|
45
|
+
def last_processed_event_id(processor_name)
|
46
|
+
track_entry = table.where(name: processor_name.to_s).first
|
47
|
+
if track_entry
|
48
|
+
track_entry[:last_processed_event_id]
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
def tracked_processors
|
53
|
+
table.select_map(:name)
|
54
|
+
end
|
55
|
+
|
56
|
+
private
|
57
|
+
|
58
|
+
def obtain_global_lock_on_processor(processor_name)
|
59
|
+
lock_obtained = @connection.fetch("select pg_try_advisory_lock(#{@track_entry_id})").to_a.first[:pg_try_advisory_lock]
|
60
|
+
if lock_obtained == false
|
61
|
+
raise UnableToLockProcessorError, "Unable to get a lock on #{processor_name} #{@track_entry_id}"
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
def create_table_if_not_exists
|
66
|
+
unless tracker_table_exists?
|
67
|
+
EventSourcery.logger.info { "Projector tracker missing - attempting to create 'projector_tracker' table" }
|
68
|
+
EventSourcery::Postgres::Schema.create_projector_tracker(db: @connection, table_name: @table_name)
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
def create_track_entry_if_not_exists(processor_name)
|
73
|
+
track_entry = table.where(name: processor_name.to_s).first
|
74
|
+
@track_entry_id = if track_entry
|
75
|
+
track_entry[:id]
|
76
|
+
else
|
77
|
+
table.insert(name: processor_name.to_s, last_processed_event_id: 0)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
def table
|
82
|
+
@connection[@table_name]
|
83
|
+
end
|
84
|
+
|
85
|
+
def tracker_table_exists?
|
86
|
+
@connection.table_exists?(@table_name)
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
@@ -0,0 +1,63 @@
|
|
1
|
+
# Usage:
|
2
|
+
#
|
3
|
+
# ❯ bundle exec ruby script/bench_reading_events.rb
|
4
|
+
# Creating 10000 events
|
5
|
+
# Took 42.35533199999918 to create events
|
6
|
+
# Took 4.9821800000027 to read all events
|
7
|
+
# ^ results from running on a 2016 MacBook
|
8
|
+
|
9
|
+
require 'benchmark'
|
10
|
+
require 'securerandom'
|
11
|
+
require 'sequel'
|
12
|
+
require 'event_sourcery/postgres'
|
13
|
+
|
14
|
+
pg_uri = ENV.fetch('BOXEN_POSTGRESQL_URL') { 'postgres://127.0.0.1:5432/' }.dup
|
15
|
+
pg_uri << 'event_sourcery_test'
|
16
|
+
pg_connection = Sequel.connect(pg_uri)
|
17
|
+
|
18
|
+
EventSourcery.configure do |config|
|
19
|
+
config.postgres.event_store_database = pg_connection
|
20
|
+
config.postgres.projections_database = pg_connection
|
21
|
+
config.logger.level = :fatal
|
22
|
+
end
|
23
|
+
|
24
|
+
def create_events_schema(pg_connection)
|
25
|
+
pg_connection.execute 'drop table if exists events'
|
26
|
+
pg_connection.execute 'drop table if exists aggregates'
|
27
|
+
EventSourcery::Postgres::Schema.create_event_store(db: pg_connection)
|
28
|
+
end
|
29
|
+
|
30
|
+
event_store = EventSourcery::Postgres.config.event_store
|
31
|
+
|
32
|
+
EVENT_TYPES = %i[
|
33
|
+
item_added
|
34
|
+
item_removed
|
35
|
+
item_starred
|
36
|
+
]
|
37
|
+
|
38
|
+
def new_event(uuid)
|
39
|
+
EventSourcery::Event.new(type: EVENT_TYPES.sample,
|
40
|
+
aggregate_id: uuid,
|
41
|
+
body: { 'something' => 'simple' })
|
42
|
+
end
|
43
|
+
|
44
|
+
create_events_schema(pg_connection)
|
45
|
+
|
46
|
+
NUM_EVENTS = 10_000
|
47
|
+
puts "Creating #{NUM_EVENTS} events"
|
48
|
+
time = Benchmark.realtime do
|
49
|
+
uuid = SecureRandom.uuid
|
50
|
+
NUM_EVENTS.times do
|
51
|
+
event_store.sink(new_event(uuid))
|
52
|
+
end
|
53
|
+
end
|
54
|
+
puts "Took #{time} to create events"
|
55
|
+
|
56
|
+
seen_events_count = 0
|
57
|
+
time = Benchmark.realtime do
|
58
|
+
event_store.subscribe(from_id: 0, subscription_master: EventSourcery::EventStore::SignalHandlingSubscriptionMaster.new) do |events|
|
59
|
+
seen_events_count += events.count
|
60
|
+
throw :stop if seen_events_count >= NUM_EVENTS
|
61
|
+
end
|
62
|
+
end
|
63
|
+
puts "Took #{time} to read all events"
|
@@ -0,0 +1,47 @@
|
|
1
|
+
# Usage:
|
2
|
+
#
|
3
|
+
# ❯ bundle exec ruby script/bench_writing_events.rb
|
4
|
+
# Warming up --------------------------------------
|
5
|
+
# event_store.sink
|
6
|
+
# 70.000 i/100ms
|
7
|
+
# Calculating -------------------------------------
|
8
|
+
# event_store.sink
|
9
|
+
# 522.007 (±10.9%) i/s - 2.590k in 5.021909s
|
10
|
+
#
|
11
|
+
# ^ results from running on a 2016 MacBook
|
12
|
+
|
13
|
+
require 'benchmark/ips'
|
14
|
+
require 'securerandom'
|
15
|
+
require 'sequel'
|
16
|
+
require 'event_sourcery/postgres'
|
17
|
+
|
18
|
+
pg_uri = ENV.fetch('BOXEN_POSTGRESQL_URL') { 'postgres://127.0.0.1:5432/' }.dup
|
19
|
+
pg_uri << 'event_sourcery_test'
|
20
|
+
pg_connection = Sequel.connect(pg_uri)
|
21
|
+
|
22
|
+
EventSourcery.configure do |config|
|
23
|
+
config.postgres.event_store_database = pg_connection
|
24
|
+
config.postgres.projections_database = pg_connection
|
25
|
+
config.logger.level = :fatal
|
26
|
+
end
|
27
|
+
|
28
|
+
def create_schema(pg_connection)
|
29
|
+
pg_connection.execute 'drop table if exists events'
|
30
|
+
pg_connection.execute 'drop table if exists aggregates'
|
31
|
+
EventSourcery::Postgres::Schema.create_event_store(db: pg_connection)
|
32
|
+
end
|
33
|
+
|
34
|
+
create_schema(pg_connection)
|
35
|
+
event_store = EventSourcery::Postgres::EventStore.new(pg_connection)
|
36
|
+
|
37
|
+
def new_event
|
38
|
+
EventSourcery::Event.new(type: :item_added,
|
39
|
+
aggregate_id: SecureRandom.uuid,
|
40
|
+
body: { 'something' => 'simple' })
|
41
|
+
end
|
42
|
+
|
43
|
+
Benchmark.ips do |b|
|
44
|
+
b.report("event_store.sink") do
|
45
|
+
event_store.sink(new_event)
|
46
|
+
end
|
47
|
+
end
|
@@ -0,0 +1,181 @@
|
|
1
|
+
# Demonstrates that sequence IDs may not be inserted linearly with concurrent
|
2
|
+
# writers.
|
3
|
+
#
|
4
|
+
# This script writes events in parallel from a number of forked processes,
|
5
|
+
# writing events in a continious loop until the program is interrupted.
|
6
|
+
# The parent process detects gaps in sequence IDs by selecting the last 2
|
7
|
+
# events based on sequence ID. A gap is detected when the 2 IDs returned from
|
8
|
+
# that query aren't sequential. The script will proceed to execute 2 subsequent
|
9
|
+
# queries to see if they show up in the time it takes to complete those before
|
10
|
+
# moving on.
|
11
|
+
#
|
12
|
+
# An easier way to demonstrate this is by using 2 psql consoles:
|
13
|
+
#
|
14
|
+
# - Simulate a transaction taking a long time to commit:
|
15
|
+
# ```
|
16
|
+
# begin;
|
17
|
+
# insert into events (..) values (..);
|
18
|
+
# ```
|
19
|
+
# - Then, in another console:
|
20
|
+
# ```
|
21
|
+
# insert into events (..) values (..);
|
22
|
+
# select * from events;
|
23
|
+
# ```
|
24
|
+
#
|
25
|
+
# The result is that event sequence ID 2 is visible, but only when the first
|
26
|
+
# transaction commits is event sequence ID 1 visible.
|
27
|
+
#
|
28
|
+
# Why does this happen?
|
29
|
+
#
|
30
|
+
# Sequences in Postgres (and most other DBs) are not transactional, changes
|
31
|
+
# to the sequence are visible to other transactions immediately. Also, inserts
|
32
|
+
# from the forked writers may be executed in parallel by postgres.
|
33
|
+
#
|
34
|
+
# The process of inserting into a table that has a sequence or serial column is
|
35
|
+
# to first get the next sequence ID (changing global state), then perform the
|
36
|
+
# insert statement and later commit. In between these 2 steps the sequence ID
|
37
|
+
# is taken but not visible in the table until the insert statement is
|
38
|
+
# committed. Gaps in sequence IDs occur when a process takes a sequence ID and
|
39
|
+
# commits it while another process is in between those 2 steps.
|
40
|
+
#
|
41
|
+
# This means another transaction could have taken the next sequence
|
42
|
+
# ID and committed before that one commits, resulting in a gap in sequence ID's
|
43
|
+
# when reading.
|
44
|
+
#
|
45
|
+
# Why is this a problem?
|
46
|
+
#
|
47
|
+
# Event stream processors use the sequence ID to keep track of where they're up to
|
48
|
+
# in the events table. If a projector processes an event with sequence ID n, it
|
49
|
+
# assumes that the next event it needs to process will have a sequence ID > n.
|
50
|
+
# This approach isn't reliable when sequence IDs appear non-linearly, making it
|
51
|
+
# possible for event stream processors to skip over events.
|
52
|
+
#
|
53
|
+
# How does EventSourcery deal with this?
|
54
|
+
#
|
55
|
+
# EventSourcery uses n transaction level advisory lock to synchronise inserts
|
56
|
+
# to the events table within the writeEvents function. Alternatives:
|
57
|
+
#
|
58
|
+
# - Write events from 1 process only (serialize at the application level)
|
59
|
+
# - Detect gaps when reading events and allow time for in-flight transactions
|
60
|
+
# (the gaps) to commit.
|
61
|
+
# - Built in eventual consistency. Selects would be restricted to events older
|
62
|
+
# than 500ms-1s or the transaction timeout to give enough time for in-flight
|
63
|
+
# transactions to commit.
|
64
|
+
# - Only query events when catching up, after that rely on events to be
|
65
|
+
# delivered through the pub/sub mechanism. Given events would be received out
|
66
|
+
# of order under concurrent writes there's potential for processors to process
|
67
|
+
# a given event twice if they shutdown after processing a sequence that was
|
68
|
+
# part of a gap.
|
69
|
+
#
|
70
|
+
# Usage:
|
71
|
+
#
|
72
|
+
# ❯ bundle exec ruby script/demonstrate_event_sequence_id_gaps.rb
|
73
|
+
# 89847: starting to write events89846: starting to write events
|
74
|
+
|
75
|
+
# 89848: starting to write events
|
76
|
+
# 89849: starting to write events
|
77
|
+
# 89850: starting to write events
|
78
|
+
# GAP: 1 missing sequence IDs. 78 != 76 + 1. Missing events showed up after 1 subsequent query. IDs: [77]
|
79
|
+
# GAP: 1 missing sequence IDs. 168 != 166 + 1. Missing events showed up after 1 subsequent query. IDs: [167]
|
80
|
+
# GAP: 1 missing sequence IDs. 274 != 272 + 1. Missing events showed up after 1 subsequent query. IDs: [273]
|
81
|
+
# GAP: 1 missing sequence IDs. 341 != 339 + 1. Missing events showed up after 1 subsequent query. IDs: [340]
|
82
|
+
# GAP: 1 missing sequence IDs. 461 != 459 + 1. Missing events showed up after 1 subsequent query. IDs: [460]
|
83
|
+
# GAP: 1 missing sequence IDs. 493 != 491 + 1. Missing events showed up after 1 subsequent query. IDs: [492]
|
84
|
+
# GAP: 2 missing sequence IDs. 621 != 618 + 1. Missing events showed up after 1 subsequent query. IDs: [619, 620]
|
85
|
+
|
86
|
+
require 'sequel'
|
87
|
+
require 'securerandom'
|
88
|
+
require 'event_sourcery/postgres'
|
89
|
+
|
90
|
+
def connect
|
91
|
+
pg_uri = ENV.fetch('BOXEN_POSTGRESQL_URL') { 'postgres://127.0.0.1:5432/' }.dup
|
92
|
+
pg_uri << 'event_sourcery_test'
|
93
|
+
Sequel.connect(pg_uri)
|
94
|
+
end
|
95
|
+
|
96
|
+
EventSourcery.logger.level = :info
|
97
|
+
|
98
|
+
def new_event
|
99
|
+
EventSourcery::Event.new(type: :item_added,
|
100
|
+
aggregate_id: SecureRandom.uuid,
|
101
|
+
body: { 'something' => 'simple' })
|
102
|
+
end
|
103
|
+
|
104
|
+
def create_events_schema(db)
|
105
|
+
db.execute 'drop table if exists events'
|
106
|
+
db.execute 'drop table if exists aggregates'
|
107
|
+
EventSourcery::Postgres::Schema.create_event_store(db: db)
|
108
|
+
end
|
109
|
+
|
110
|
+
db = connect
|
111
|
+
create_events_schema(db)
|
112
|
+
db.disconnect
|
113
|
+
sleep 0.3
|
114
|
+
|
115
|
+
NUM_WRITER_PROCESSES = 5
|
116
|
+
NUM_WRITER_PROCESSES.times do
|
117
|
+
fork do |pid|
|
118
|
+
stop = false
|
119
|
+
Signal.trap(:INT) { stop = true }
|
120
|
+
db = connect
|
121
|
+
# when lock_table is set to true an advisory lock is used to synchronise
|
122
|
+
# inserts and no gaps are detected
|
123
|
+
event_store = EventSourcery::Postgres::EventStore.new(db, lock_table: false)
|
124
|
+
puts "#{Process.pid}: starting to write events"
|
125
|
+
until stop
|
126
|
+
event_store.sink(new_event)
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
131
|
+
stop = false
|
132
|
+
Signal.trap(:INT) { stop = true }
|
133
|
+
|
134
|
+
def wait_for_missing_ids(db, first_sequence, last_sequence, attempt: 1)
|
135
|
+
missing_ids = db[:events].where(Sequel.lit("id > ? AND id < ?", first_sequence, last_sequence)).order(:id).map {|e| e[:id] }
|
136
|
+
expected_missing_ids = (first_sequence+1)..(last_sequence-1)
|
137
|
+
if missing_ids == expected_missing_ids.to_a
|
138
|
+
print "Missing events showed up after #{attempt} subsequent query. IDs: #{missing_ids}"
|
139
|
+
else
|
140
|
+
if attempt < 2
|
141
|
+
wait_for_missing_ids(db, first_sequence, last_sequence, attempt: attempt + 1)
|
142
|
+
else
|
143
|
+
print "Missing events didn't show up after #{attempt} subsequent queries"
|
144
|
+
end
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
until stop
|
149
|
+
|
150
|
+
# query for the last 2 sequences in the events table
|
151
|
+
first_sequence, last_sequence = *db[:events].
|
152
|
+
order(Sequel.desc(:id)).
|
153
|
+
select(:id).
|
154
|
+
limit(2).
|
155
|
+
map { |e| e[:id] }.
|
156
|
+
reverse
|
157
|
+
|
158
|
+
next if first_sequence.nil? || last_sequence.nil?
|
159
|
+
|
160
|
+
if last_sequence != first_sequence + 1
|
161
|
+
num_missing = last_sequence - first_sequence - 1
|
162
|
+
print "GAP: #{num_missing} missing sequence IDs. #{last_sequence} != #{first_sequence} + 1. "
|
163
|
+
wait_for_missing_ids(db, first_sequence, last_sequence)
|
164
|
+
puts
|
165
|
+
end
|
166
|
+
end
|
167
|
+
|
168
|
+
Process.waitall
|
169
|
+
|
170
|
+
puts
|
171
|
+
puts "Looking for gaps in sequence IDs in events table:"
|
172
|
+
ids = db[:events].select(:id).order(:id).all.map { |e| e[:id] }
|
173
|
+
expected_ids = (ids.min..ids.max).to_a
|
174
|
+
missing_ids = (expected_ids - ids)
|
175
|
+
if missing_ids.empty?
|
176
|
+
puts "No remaining gaps"
|
177
|
+
else
|
178
|
+
missing_ids.each do |id|
|
179
|
+
puts "Unable to find row with sequence ID #{id}"
|
180
|
+
end
|
181
|
+
end
|
metadata
ADDED
@@ -0,0 +1,181 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: event_sourcery-postgres
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.3.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Steve Hodgkiss
|
8
|
+
autorequire:
|
9
|
+
bindir: exe
|
10
|
+
cert_chain: []
|
11
|
+
date: 2017-06-16 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: sequel
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "~>"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '4.38'
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '4.38'
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: pg
|
29
|
+
requirement: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - ">="
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '0'
|
34
|
+
type: :runtime
|
35
|
+
prerelease: false
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - ">="
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '0'
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
name: event_sourcery
|
43
|
+
requirement: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - ">="
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: 0.10.0
|
48
|
+
type: :runtime
|
49
|
+
prerelease: false
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - ">="
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: 0.10.0
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: bundler
|
57
|
+
requirement: !ruby/object:Gem::Requirement
|
58
|
+
requirements:
|
59
|
+
- - "~>"
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: '1.10'
|
62
|
+
type: :development
|
63
|
+
prerelease: false
|
64
|
+
version_requirements: !ruby/object:Gem::Requirement
|
65
|
+
requirements:
|
66
|
+
- - "~>"
|
67
|
+
- !ruby/object:Gem::Version
|
68
|
+
version: '1.10'
|
69
|
+
- !ruby/object:Gem::Dependency
|
70
|
+
name: rake
|
71
|
+
requirement: !ruby/object:Gem::Requirement
|
72
|
+
requirements:
|
73
|
+
- - "~>"
|
74
|
+
- !ruby/object:Gem::Version
|
75
|
+
version: '10.0'
|
76
|
+
type: :development
|
77
|
+
prerelease: false
|
78
|
+
version_requirements: !ruby/object:Gem::Requirement
|
79
|
+
requirements:
|
80
|
+
- - "~>"
|
81
|
+
- !ruby/object:Gem::Version
|
82
|
+
version: '10.0'
|
83
|
+
- !ruby/object:Gem::Dependency
|
84
|
+
name: rspec
|
85
|
+
requirement: !ruby/object:Gem::Requirement
|
86
|
+
requirements:
|
87
|
+
- - "~>"
|
88
|
+
- !ruby/object:Gem::Version
|
89
|
+
version: '3.0'
|
90
|
+
type: :development
|
91
|
+
prerelease: false
|
92
|
+
version_requirements: !ruby/object:Gem::Requirement
|
93
|
+
requirements:
|
94
|
+
- - "~>"
|
95
|
+
- !ruby/object:Gem::Version
|
96
|
+
version: '3.0'
|
97
|
+
- !ruby/object:Gem::Dependency
|
98
|
+
name: pry
|
99
|
+
requirement: !ruby/object:Gem::Requirement
|
100
|
+
requirements:
|
101
|
+
- - ">="
|
102
|
+
- !ruby/object:Gem::Version
|
103
|
+
version: '0'
|
104
|
+
type: :development
|
105
|
+
prerelease: false
|
106
|
+
version_requirements: !ruby/object:Gem::Requirement
|
107
|
+
requirements:
|
108
|
+
- - ">="
|
109
|
+
- !ruby/object:Gem::Version
|
110
|
+
version: '0'
|
111
|
+
- !ruby/object:Gem::Dependency
|
112
|
+
name: benchmark-ips
|
113
|
+
requirement: !ruby/object:Gem::Requirement
|
114
|
+
requirements:
|
115
|
+
- - ">="
|
116
|
+
- !ruby/object:Gem::Version
|
117
|
+
version: '0'
|
118
|
+
type: :development
|
119
|
+
prerelease: false
|
120
|
+
version_requirements: !ruby/object:Gem::Requirement
|
121
|
+
requirements:
|
122
|
+
- - ">="
|
123
|
+
- !ruby/object:Gem::Version
|
124
|
+
version: '0'
|
125
|
+
description:
|
126
|
+
email:
|
127
|
+
- steve@hodgkiss.me
|
128
|
+
executables: []
|
129
|
+
extensions: []
|
130
|
+
extra_rdoc_files: []
|
131
|
+
files:
|
132
|
+
- ".gitignore"
|
133
|
+
- ".rspec"
|
134
|
+
- ".travis.yml"
|
135
|
+
- CHANGELOG.md
|
136
|
+
- CODE_OF_CONDUCT.md
|
137
|
+
- Gemfile
|
138
|
+
- LICENSE.txt
|
139
|
+
- README.md
|
140
|
+
- Rakefile
|
141
|
+
- bin/console
|
142
|
+
- bin/setup
|
143
|
+
- event_sourcery-postgres.gemspec
|
144
|
+
- lib/event_sourcery/postgres.rb
|
145
|
+
- lib/event_sourcery/postgres/config.rb
|
146
|
+
- lib/event_sourcery/postgres/event_store.rb
|
147
|
+
- lib/event_sourcery/postgres/optimised_event_poll_waiter.rb
|
148
|
+
- lib/event_sourcery/postgres/projector.rb
|
149
|
+
- lib/event_sourcery/postgres/queue_with_interval_callback.rb
|
150
|
+
- lib/event_sourcery/postgres/reactor.rb
|
151
|
+
- lib/event_sourcery/postgres/schema.rb
|
152
|
+
- lib/event_sourcery/postgres/table_owner.rb
|
153
|
+
- lib/event_sourcery/postgres/tracker.rb
|
154
|
+
- lib/event_sourcery/postgres/version.rb
|
155
|
+
- script/bench_reading_events.rb
|
156
|
+
- script/bench_writing_events.rb
|
157
|
+
- script/demonstrate_event_sequence_id_gaps.rb
|
158
|
+
homepage: https://github.com/envato/event_sourcery-postgres
|
159
|
+
licenses: []
|
160
|
+
metadata: {}
|
161
|
+
post_install_message:
|
162
|
+
rdoc_options: []
|
163
|
+
require_paths:
|
164
|
+
- lib
|
165
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
166
|
+
requirements:
|
167
|
+
- - ">="
|
168
|
+
- !ruby/object:Gem::Version
|
169
|
+
version: 2.2.0
|
170
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
171
|
+
requirements:
|
172
|
+
- - ">="
|
173
|
+
- !ruby/object:Gem::Version
|
174
|
+
version: '0'
|
175
|
+
requirements: []
|
176
|
+
rubyforge_project:
|
177
|
+
rubygems_version: 2.6.11
|
178
|
+
signing_key:
|
179
|
+
specification_version: 4
|
180
|
+
summary: Postgres event store for use with EventSourcery
|
181
|
+
test_files: []
|