akasha 0.2.0 → 0.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.rubocop.yml +10 -0
- data/.travis.yml +22 -30
- data/CHANGELOG.md +7 -0
- data/Dockerfile +24 -0
- data/Gemfile +2 -2
- data/Gemfile.lock +36 -10
- data/README.md +62 -55
- data/Rakefile +8 -3
- data/akasha.gemspec +13 -6
- data/bin/console +3 -3
- data/bin/integration-tests.sh +40 -0
- data/docker/docker-compose.yml +17 -0
- data/examples/sinatra/Gemfile +3 -3
- data/examples/sinatra/Gemfile.lock +28 -6
- data/examples/sinatra/app.rb +54 -23
- data/lib/akasha.rb +2 -0
- data/lib/akasha/aggregate/syntax_helpers.rb +1 -1
- data/lib/akasha/async_event_router.rb +44 -0
- data/lib/akasha/changeset.rb +3 -1
- data/lib/akasha/checkpoint/http_event_store_checkpoint.rb +45 -0
- data/lib/akasha/command_router.rb +11 -2
- data/lib/akasha/event.rb +8 -12
- data/lib/akasha/event_router.rb +10 -33
- data/lib/akasha/event_router_base.rb +39 -0
- data/lib/akasha/repository.rb +13 -0
- data/lib/akasha/storage/http_event_store.rb +39 -5
- data/lib/akasha/storage/http_event_store/client.rb +169 -0
- data/lib/akasha/storage/http_event_store/event_serializer.rb +34 -0
- data/lib/akasha/storage/http_event_store/projection_manager.rb +67 -0
- data/lib/akasha/storage/http_event_store/response_handler.rb +17 -0
- data/lib/akasha/storage/http_event_store/stream.rb +17 -17
- data/lib/akasha/storage/memory_event_store.rb +31 -1
- data/lib/akasha/storage/memory_event_store/stream.rb +12 -2
- data/lib/akasha/version.rb +1 -1
- metadata +121 -13
data/lib/akasha/repository.rb
CHANGED
@@ -3,6 +3,8 @@ module Akasha
|
|
3
3
|
# Not meant to be used directly (see aggregate/syntax_helpers.rb)
|
4
4
|
# See specs for usage.
|
5
5
|
class Repository
|
6
|
+
attr_reader :store
|
7
|
+
|
6
8
|
STREAM_NAME_SEP = '-'.freeze
|
7
9
|
|
8
10
|
# Creates a new repository using the underlying `store` (e.g. `MemoryEventStore`).
|
@@ -44,6 +46,17 @@ module Akasha
|
|
44
46
|
@subscribers << callable
|
45
47
|
end
|
46
48
|
|
49
|
+
# Merges all streams into one, filtering the resulting stream
|
50
|
+
# so it only contains events with the specified names, using
|
51
|
+
# a projection.
|
52
|
+
#
|
53
|
+
# Arguments:
|
54
|
+
# `into` - name of the new stream
|
55
|
+
# `only` - array of event names
|
56
|
+
def merge_all_by_event(into:, only:)
|
57
|
+
@store.merge_all_by_event(into: into, only: only)
|
58
|
+
end
|
59
|
+
|
47
60
|
private
|
48
61
|
|
49
62
|
def stream_name(aggregate_klass, aggregate_id)
|
@@ -1,24 +1,58 @@
|
|
1
|
+
require_relative 'http_event_store/client'
|
1
2
|
require_relative 'http_event_store/stream'
|
2
|
-
require 'http_event_store'
|
3
3
|
|
4
4
|
module Akasha
|
5
5
|
module Storage
|
6
6
|
# HTTP-based interface to Eventstore (https://geteventstore.com)
|
7
7
|
class HttpEventStore
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
8
|
+
# Base class for all HTTP Event store errors.
|
9
|
+
Error = Class.new(RuntimeError)
|
10
|
+
# Stream name contains invalid characters.
|
11
|
+
InvalidStreamNameError = Class.new(Error)
|
12
|
+
|
13
|
+
# Base class for HTTP errors.
|
14
|
+
class HttpError < Error
|
15
|
+
attr_reader :status_code
|
16
|
+
|
17
|
+
def initialize(status_code)
|
18
|
+
@status_code = status_code
|
19
|
+
super("Unexpected HTTP response: #{@status_code}")
|
12
20
|
end
|
13
21
|
end
|
14
22
|
|
23
|
+
# 4xx HTTP status code.
|
24
|
+
HttpClientError = Class.new(HttpError)
|
25
|
+
# 5xx HTTP status code.
|
26
|
+
HttpServerError = Class.new(HttpError)
|
27
|
+
|
28
|
+
# Creates a new event store client, connecting to the specified host and port
|
29
|
+
# using an optional username and password.
|
30
|
+
def initialize(host: 'localhost', port: 2113, username: nil, password: nil)
|
31
|
+
@client = Client.new(host: host, port: port, username: username, password: password)
|
32
|
+
end
|
33
|
+
|
34
|
+
# Returns a Hash of streams. You can retrieve a Stream instance corresponding
|
35
|
+
# to any stream by its name. The stream does not have to exist, appending to
|
36
|
+
# it will create it.
|
15
37
|
def streams
|
16
38
|
self # Use the `[]` method on self.
|
17
39
|
end
|
18
40
|
|
41
|
+
# Shortcut for accessing streams by their names.
|
19
42
|
def [](stream_name)
|
20
43
|
Stream.new(@client, stream_name)
|
21
44
|
end
|
45
|
+
|
46
|
+
# Merges all streams into one, filtering the resulting stream
|
47
|
+
# so it only contains events with the specified names, using
|
48
|
+
# a projection.
|
49
|
+
#
|
50
|
+
# Arguments:
|
51
|
+
# `into` - name of the new stream
|
52
|
+
# `only` - array of event names
|
53
|
+
def merge_all_by_event(into:, only:)
|
54
|
+
@client.merge_all_by_event(into, only)
|
55
|
+
end
|
22
56
|
end
|
23
57
|
end
|
24
58
|
end
|
@@ -0,0 +1,169 @@
|
|
1
|
+
require 'base64'
|
2
|
+
require 'corefines/hash'
|
3
|
+
require 'json'
|
4
|
+
require 'faraday'
|
5
|
+
require 'faraday_middleware'
|
6
|
+
require 'rack/utils'
|
7
|
+
require 'retries'
|
8
|
+
require 'time'
|
9
|
+
require 'typhoeus/adapters/faraday'
|
10
|
+
|
11
|
+
require_relative 'event_serializer'
|
12
|
+
require_relative 'response_handler'
|
13
|
+
require_relative 'projection_manager'
|
14
|
+
|
15
|
+
module Akasha
|
16
|
+
module Storage
|
17
|
+
class HttpEventStore
|
18
|
+
# Eventstore HTTP client.
|
19
|
+
class Client
|
20
|
+
using Corefines::Hash
|
21
|
+
|
22
|
+
# A lower limit for a retry interval.
|
23
|
+
MIN_RETRY_INTERVAL = 0
|
24
|
+
# An upper limit for a retry interval.
|
25
|
+
MAX_RETRY_INTERVAL = 10.0
|
26
|
+
|
27
|
+
# Creates a new client for the host and port with optional username and password
|
28
|
+
# for authenticating certain requests.
|
29
|
+
def initialize(host: 'localhost', port: 2113, username: nil, password: nil)
|
30
|
+
@username = username
|
31
|
+
@password = password
|
32
|
+
@conn = connection(host, port)
|
33
|
+
@serializer = EventSerializer.new
|
34
|
+
end
|
35
|
+
|
36
|
+
# Append events to stream, idempotently retrying_on_network_failures up to `max_retries`
|
37
|
+
def retry_append_to_stream(stream_name, events, expected_version = nil, max_retries: 0)
|
38
|
+
retrying_on_network_failures(max_retries) do
|
39
|
+
append_to_stream(stream_name, events, expected_version)
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
# Read events from stream, retrying_on_network_failures up to `max_retries` in case of network failures.
|
44
|
+
# Reads `count` events starting from `start` inclusive.
|
45
|
+
# Can long-poll for events if `poll` is specified.`
|
46
|
+
def retry_read_events_forward(stream_name, start, count, poll = 0, max_retries: 0)
|
47
|
+
retrying_on_network_failures(max_retries) do
|
48
|
+
safe_read_events(stream_name, start, count, poll)
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
# Merges all streams into one, filtering the resulting stream
|
53
|
+
# so it only contains events with the specified names, using
|
54
|
+
# a projection.
|
55
|
+
#
|
56
|
+
# Arguments:
|
57
|
+
# `name` - name of the projection stream
|
58
|
+
# `event_names` - array of event names
|
59
|
+
def merge_all_by_event(name, event_names, max_retries: 0)
|
60
|
+
retrying_on_network_failures(max_retries) do
|
61
|
+
ProjectionManager.new(self).merge_all_by_event(name, event_names)
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
# Reads stream metadata.
|
66
|
+
def retry_read_metadata(stream_name, max_retries: 0)
|
67
|
+
retrying_on_network_failures(max_retries) do
|
68
|
+
safe_read_metadata(stream_name)
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
# Updates stream metadata.
|
73
|
+
def retry_write_metadata(stream_name, metadata)
|
74
|
+
event = Akasha::Event.new(:stream_metadata_changed, SecureRandom.uuid, metadata)
|
75
|
+
retry_append_to_stream("#{stream_name}/metadata", [event])
|
76
|
+
end
|
77
|
+
|
78
|
+
# Issues a generic request against the API.
|
79
|
+
def request(method, path, body = nil, headers = {})
|
80
|
+
body = @conn.public_send(method, path, body, auth_headers.merge(headers)).body
|
81
|
+
return {} if body.empty?
|
82
|
+
body
|
83
|
+
end
|
84
|
+
|
85
|
+
private
|
86
|
+
|
87
|
+
def connection(host, port)
|
88
|
+
Faraday.new do |conn|
|
89
|
+
conn.host = host
|
90
|
+
conn.port = port
|
91
|
+
conn.response :json, content_type: 'application/json'
|
92
|
+
conn.use ResponseHandler
|
93
|
+
conn.adapter :typhoeus
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
def auth_headers
|
98
|
+
if @username && @password
|
99
|
+
auth = Base64.urlsafe_encode64([@username, @password].join(':'))
|
100
|
+
{
|
101
|
+
'Authorization' => "Basic #{auth}"
|
102
|
+
}
|
103
|
+
else
|
104
|
+
{}
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
def retrying_on_network_failures(max_retries)
|
109
|
+
with_retries(base_sleep_seconds: MIN_RETRY_INTERVAL,
|
110
|
+
max_sleep_seconds: MAX_RETRY_INTERVAL,
|
111
|
+
max_tries: 1 + max_retries,
|
112
|
+
rescue: [Faraday::TimeoutError, Faraday::ConnectionFailed]) do
|
113
|
+
yield
|
114
|
+
end
|
115
|
+
end
|
116
|
+
|
117
|
+
def append_to_stream(stream_name, events, _expected_version = nil)
|
118
|
+
@conn.post("/streams/#{stream_name}") do |req|
|
119
|
+
req.headers = {
|
120
|
+
'Content-Type' => 'application/vnd.eventstore.events+json',
|
121
|
+
# 'ES-ExpectedVersion' => expected_version
|
122
|
+
}
|
123
|
+
req.body = to_event_data(events).to_json
|
124
|
+
end
|
125
|
+
end
|
126
|
+
|
127
|
+
def safe_read_events(stream_name, start, count, poll)
|
128
|
+
resp = @conn.get("/streams/#{stream_name}/#{start}/forward/#{count}") do |req|
|
129
|
+
req.headers = {
|
130
|
+
'Accept' => 'application/json'
|
131
|
+
}
|
132
|
+
req.headers['ES-LongPoll'] = poll if poll&.positive?
|
133
|
+
req.params['embed'] = 'body'
|
134
|
+
end
|
135
|
+
event_data = resp.body['entries']
|
136
|
+
to_events(event_data)
|
137
|
+
rescue HttpClientError => e
|
138
|
+
return [] if e.status_code == 404
|
139
|
+
raise
|
140
|
+
rescue URI::InvalidURIError
|
141
|
+
raise InvalidStreamNameError, "Invalid stream name: #{stream_name}"
|
142
|
+
end
|
143
|
+
|
144
|
+
def safe_read_metadata(stream_name)
|
145
|
+
metadata = request(:get, "/streams/#{stream_name}/metadata", nil, 'Accept' => 'application/json')
|
146
|
+
metadata.symbolize_keys
|
147
|
+
rescue HttpClientError => e
|
148
|
+
return {} if e.status_code == 404
|
149
|
+
raise
|
150
|
+
rescue URI::InvalidURIError
|
151
|
+
raise InvalidStreamNameError, "Invalid stream name: #{stream_name}"
|
152
|
+
end
|
153
|
+
|
154
|
+
def to_event_data(events)
|
155
|
+
@serializer.serialize(events)
|
156
|
+
end
|
157
|
+
|
158
|
+
def to_events(es_events)
|
159
|
+
es_events = es_events.map do |ev|
|
160
|
+
ev['data'] &&= JSON.parse(ev['data'])
|
161
|
+
ev['metaData'] &&= JSON.parse(ev['metaData'])
|
162
|
+
ev
|
163
|
+
end
|
164
|
+
@serializer.deserialize(es_events)
|
165
|
+
end
|
166
|
+
end
|
167
|
+
end
|
168
|
+
end
|
169
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
require 'corefines/hash'
|
2
|
+
|
3
|
+
module Akasha
|
4
|
+
module Storage
|
5
|
+
class HttpEventStore
|
6
|
+
# Serializes and deserializes events to and from the format required
|
7
|
+
# by the HTTP Eventstore API
|
8
|
+
class EventSerializer
|
9
|
+
using Corefines::Hash
|
10
|
+
|
11
|
+
def serialize(events)
|
12
|
+
events.map do |event|
|
13
|
+
base = {
|
14
|
+
'eventType' => event.name,
|
15
|
+
'data' => event.data,
|
16
|
+
'metaData' => event.metadata
|
17
|
+
}
|
18
|
+
base['eventId'] = event.id unless event.id.nil?
|
19
|
+
base
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
def deserialize(es_events)
|
24
|
+
es_events.map do |ev|
|
25
|
+
metadata = ev['metaData']&.symbolize_keys || {}
|
26
|
+
data = ev['data']&.symbolize_keys || {}
|
27
|
+
event = Akasha::Event.new(ev['eventType'].to_sym, ev['eventId'], metadata, **data)
|
28
|
+
event
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,67 @@
|
|
1
|
+
module Akasha
|
2
|
+
module Storage
|
3
|
+
class HttpEventStore
|
4
|
+
# Manages HTTP ES projections.
|
5
|
+
class ProjectionManager
|
6
|
+
def initialize(client)
|
7
|
+
@client = client
|
8
|
+
end
|
9
|
+
|
10
|
+
# Merges all streams into one, filtering the resulting stream
|
11
|
+
# so it only contains events with the specified names, using
|
12
|
+
# a projection.
|
13
|
+
#
|
14
|
+
# Arguments:
|
15
|
+
# `name` - name of the projection stream
|
16
|
+
# `event_names` - array of event names
|
17
|
+
def merge_all_by_event(name, event_names)
|
18
|
+
attempt_create_projection(name, event_names) ||
|
19
|
+
update_projection(name, event_names)
|
20
|
+
end
|
21
|
+
|
22
|
+
private
|
23
|
+
|
24
|
+
def projection_javascript(name, events)
|
25
|
+
callbacks = events.map { |en| "\"#{en}\": function(s,e) { linkTo('#{name}', e) }" }
|
26
|
+
# Alternative code using internal indexing.
|
27
|
+
# It's broken though because it reorders events for aggregates (because the streams
|
28
|
+
# it uses are per-event). An alternative would be to use aggregates as streams
|
29
|
+
# to pull from.
|
30
|
+
# et_streams = events.map { |en| "\"$et-#{en}\"" }
|
31
|
+
# "fromStreams([#{et_streams.join(', ')}]).when({ #{callbacks.join(', ')} });"
|
32
|
+
''"
|
33
|
+
// This is hard to find, so I'm leaving it here:
|
34
|
+
// options({
|
35
|
+
// reorderEvents: true,
|
36
|
+
// processingLag: 100 //time in ms
|
37
|
+
// });
|
38
|
+
fromAll().when({ #{callbacks.join(', ')} });
|
39
|
+
"''
|
40
|
+
end
|
41
|
+
|
42
|
+
def attempt_create_projection(name, event_names)
|
43
|
+
create_options = {
|
44
|
+
name: name,
|
45
|
+
emit: :yes,
|
46
|
+
checkpoints: :yes,
|
47
|
+
enabled: :yes
|
48
|
+
}
|
49
|
+
query_string = Rack::Utils.build_query(create_options)
|
50
|
+
@client.request(:post, "/projections/continuous?#{query_string}",
|
51
|
+
projection_javascript(name, event_names),
|
52
|
+
'Content-Type' => 'application/javascript')
|
53
|
+
true
|
54
|
+
rescue HttpClientError => e
|
55
|
+
return false if e.status_code == 409
|
56
|
+
raise
|
57
|
+
end
|
58
|
+
|
59
|
+
def update_projection(name, event_names)
|
60
|
+
@client.request(:put, "/projection/#{name}/query?emit=yet",
|
61
|
+
projection_javascript(name, event_names),
|
62
|
+
'Content-Type' => 'application/javascript')
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
module Akasha
|
2
|
+
module Storage
|
3
|
+
class HttpEventStore
|
4
|
+
# Handles responses from Eventstore HTTP API.
|
5
|
+
class ResponseHandler < Faraday::Response::Middleware
|
6
|
+
def on_complete(env)
|
7
|
+
case env[:status]
|
8
|
+
when (400..499)
|
9
|
+
raise HttpClientError, env.status
|
10
|
+
when (500..599)
|
11
|
+
raise HttpServerError, env.status
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
@@ -3,46 +3,46 @@ module Akasha
|
|
3
3
|
class HttpEventStore
|
4
4
|
# HTTP Eventstore stream.
|
5
5
|
class Stream
|
6
|
+
attr_reader :name
|
7
|
+
|
6
8
|
def initialize(client, stream_name)
|
7
9
|
@client = client
|
8
|
-
@
|
10
|
+
@name = stream_name
|
9
11
|
end
|
10
12
|
|
11
13
|
# Appends events to the stream.
|
12
14
|
def write_events(events)
|
13
|
-
|
14
|
-
|
15
|
-
event_type: event.name,
|
16
|
-
data: event.data,
|
17
|
-
metadata: event.metadata
|
18
|
-
}
|
19
|
-
end
|
20
|
-
@client.append_to_stream(@stream_name, event_hashes)
|
15
|
+
return if events.empty?
|
16
|
+
@client.retry_append_to_stream(@name, events)
|
21
17
|
end
|
22
18
|
|
23
19
|
# Reads events from the stream starting from `start` inclusive.
|
24
20
|
# If block given, reads all events from the position in pages of `page_size`.
|
25
21
|
# If block not given, reads `size` events from the position.
|
26
|
-
|
22
|
+
# You can also turn on long-polling using `poll` and setting it to the number
|
23
|
+
# of seconds to wait for.
|
24
|
+
def read_events(start, page_size, poll = 0)
|
27
25
|
if block_given?
|
28
26
|
position = start
|
29
27
|
loop do
|
30
|
-
events = read_events(position, page_size)
|
28
|
+
events = read_events(position, page_size, poll)
|
31
29
|
return if events.empty?
|
32
30
|
yield(events)
|
33
31
|
position += events.size
|
34
32
|
end
|
35
33
|
else
|
36
|
-
|
34
|
+
@client.retry_read_events_forward(@name, start, page_size, poll)
|
37
35
|
end
|
38
36
|
end
|
39
37
|
|
40
|
-
|
38
|
+
# Reads stream metadata.
|
39
|
+
def metadata
|
40
|
+
@client.retry_read_metadata(@name)
|
41
|
+
end
|
41
42
|
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
[]
|
43
|
+
# Updates stream metadata.
|
44
|
+
def metadata=(metadata)
|
45
|
+
@client.retry_write_metadata(@name, metadata)
|
46
46
|
end
|
47
47
|
end
|
48
48
|
end
|
@@ -10,7 +10,37 @@ module Akasha
|
|
10
10
|
attr_reader :streams
|
11
11
|
|
12
12
|
def initialize
|
13
|
-
|
13
|
+
store = self
|
14
|
+
@streams = Hash.new do |streams, name|
|
15
|
+
streams[name] = Stream.new do |new_events|
|
16
|
+
store.update_projections(new_events)
|
17
|
+
new_events
|
18
|
+
end
|
19
|
+
end
|
20
|
+
@projections = []
|
21
|
+
end
|
22
|
+
|
23
|
+
# Merges all streams into one, filtering the resulting stream
|
24
|
+
# so it only contains events with the specified names.
|
25
|
+
#
|
26
|
+
# Arguments:
|
27
|
+
# `new_stream_name` - name of the new stream
|
28
|
+
# `only` - array of event names
|
29
|
+
def merge_all_by_event(into:, only:)
|
30
|
+
new_stream = Stream.new do |new_events|
|
31
|
+
new_events.select { |event| only.include?(event.name) }
|
32
|
+
end
|
33
|
+
@streams[into] = new_stream
|
34
|
+
@projections << new_stream
|
35
|
+
new_stream
|
36
|
+
end
|
37
|
+
|
38
|
+
protected
|
39
|
+
|
40
|
+
def update_projections(events)
|
41
|
+
@projections.each do |projection|
|
42
|
+
projection.write_events(events)
|
43
|
+
end
|
14
44
|
end
|
15
45
|
end
|
16
46
|
end
|