pg_eventstore 0.9.0 → 0.10.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (38) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +14 -0
  3. data/db/migrations/{3_create_events.sql → 1_create_events.sql} +11 -14
  4. data/db/migrations/5_partitions.sql +16 -0
  5. data/docs/configuration.md +1 -1
  6. data/docs/events_and_streams.md +5 -3
  7. data/docs/how_it_works.md +80 -0
  8. data/lib/pg_eventstore/client.rb +11 -7
  9. data/lib/pg_eventstore/commands/append.rb +17 -8
  10. data/lib/pg_eventstore/commands/link_to.rb +3 -3
  11. data/lib/pg_eventstore/commands/read.rb +1 -1
  12. data/lib/pg_eventstore/errors.rb +17 -6
  13. data/lib/pg_eventstore/event.rb +5 -1
  14. data/lib/pg_eventstore/event_deserializer.rb +4 -1
  15. data/lib/pg_eventstore/queries/event_queries.rb +65 -26
  16. data/lib/pg_eventstore/queries/links_resolver.rb +31 -0
  17. data/lib/pg_eventstore/queries/partition_queries.rb +184 -0
  18. data/lib/pg_eventstore/queries/subscription_queries.rb +12 -15
  19. data/lib/pg_eventstore/queries/transaction_queries.rb +13 -0
  20. data/lib/pg_eventstore/queries.rb +5 -6
  21. data/lib/pg_eventstore/query_builders/events_filtering_query.rb +10 -31
  22. data/lib/pg_eventstore/rspec/test_helpers.rb +16 -1
  23. data/lib/pg_eventstore/sql_builder.rb +34 -4
  24. data/lib/pg_eventstore/stream.rb +3 -8
  25. data/lib/pg_eventstore/subscriptions/events_processor.rb +10 -2
  26. data/lib/pg_eventstore/subscriptions/subscription.rb +1 -0
  27. data/lib/pg_eventstore/subscriptions/subscription_feeder.rb +1 -1
  28. data/lib/pg_eventstore/subscriptions/subscription_runners_feeder.rb +2 -3
  29. data/lib/pg_eventstore/version.rb +1 -1
  30. metadata +10 -11
  31. data/db/migrations/1_create_streams.sql +0 -13
  32. data/db/migrations/2_create_event_types.sql +0 -10
  33. data/lib/pg_eventstore/queries/event_type_queries.rb +0 -74
  34. data/lib/pg_eventstore/queries/preloader.rb +0 -37
  35. data/lib/pg_eventstore/queries/stream_queries.rb +0 -77
  36. /data/db/migrations/{4_create_subscriptions.sql → 2_create_subscriptions.sql} +0 -0
  37. /data/db/migrations/{5_create_subscription_commands.sql → 3_create_subscription_commands.sql} +0 -0
  38. /data/db/migrations/{6_create_subscriptions_set_commands.sql → 4_create_subscriptions_set_commands.sql} +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 4ca951a11a040b193a289feb28627294328acebaa929730928749ddddeb41a83
4
- data.tar.gz: 4af9d3c3ee4b4b4ce77092bc699cb1ad97d9a9f8e5fde4ff9d1d8078c2385efc
3
+ metadata.gz: ccfbe47f6f178820e5f7b5ccc9fb9298bf6beba4fa10081cd01bcd6ce3fe1001
4
+ data.tar.gz: b55180dc1c24ec04d13e99b0d4ba920eae5eef652ccde632f332ab67b27cc14e
5
5
  SHA512:
6
- metadata.gz: 68baa83af9884c14f312e85f4ef619dd9365ffc273b0a629b14c466f25d393de884c2fdca02849b03f40a87535f6253751d3bc4eb1d702e630cceb0fd511c0a7
7
- data.tar.gz: e2c14d9b5767c3a55dda6a594345159e11c5a5e04c18060ddcb587ba40a011be84936c0a36c746ad40928d97a321e468890b37108c0129e4cdb2bda17996d4a5
6
+ metadata.gz: 4226be0aa1f6109b5a956febca5343b1a1d215c6a16e056267027287bcb629295cd4af1a7f6fc093e781c90a48f885f2b5932faff7962bdd993305519bfca053
7
+ data.tar.gz: 2eb04ad6b29569f29282768e806813823e990493674c627252963d312966aa1806f06139b4b01937699c0fcf4b25034c2e02cb7375710d65838d62bb3934099d
data/CHANGELOG.md CHANGED
@@ -1,5 +1,19 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [0.10.2] - 2024-03-13
4
+
5
+ - Review the approach to resolve link events
6
+ - Fix subscriptions restart interval option not being processed correctly
7
+
8
+ ## [0.10.1] - 2024-03-12
9
+
10
+ - Handle edge case when creating partitions
11
+
12
+ ## [0.10.0] - 2024-03-12
13
+
14
+ - Reimplement db structure
15
+ - Optimize `#append_to_stream` method - it now produces one `INSERT` query when publishing multiple events
16
+
3
17
  ## [0.9.0] - 2024-02-23
4
18
 
5
19
  - Use POSIX locale for streams and event types
@@ -1,27 +1,24 @@
1
1
  CREATE TABLE public.events
2
2
  (
3
3
  id uuid DEFAULT public.gen_random_uuid() NOT NULL,
4
- stream_id bigint NOT NULL,
4
+ context character varying COLLATE "POSIX" NOT NULL,
5
+ stream_name character varying COLLATE "POSIX" NOT NULL,
6
+ stream_id character varying COLLATE "POSIX" NOT NULL,
5
7
  global_position bigserial NOT NULL,
6
8
  stream_revision integer NOT NULL,
7
9
  data jsonb DEFAULT '{}'::jsonb NOT NULL,
8
10
  metadata jsonb DEFAULT '{}'::jsonb NOT NULL,
9
11
  link_id uuid,
10
12
  created_at timestamp without time zone DEFAULT now() NOT NULL,
11
- event_type_id bigint NOT NULL
12
- );
13
+ type character varying COLLATE "POSIX" NOT NULL
14
+ ) PARTITION BY LIST (context);
13
15
 
14
16
  ALTER TABLE ONLY public.events
15
- ADD CONSTRAINT events_pkey PRIMARY KEY (id);
17
+ ADD CONSTRAINT events_pkey PRIMARY KEY (context, stream_name, type, global_position);
16
18
 
17
- CREATE INDEX idx_events_event_type_id_and_global_position ON public.events USING btree (event_type_id, global_position);
18
- CREATE INDEX idx_events_global_position ON public.events USING btree (global_position);
19
- CREATE INDEX idx_events_link_id ON public.events USING btree (link_id);
20
- CREATE INDEX idx_events_stream_id_and_revision ON public.events USING btree (stream_id, stream_revision);
19
+ CREATE INDEX idx_events_stream_id_and_stream_revision ON public.events USING btree (stream_id, stream_revision);
20
+ CREATE INDEX idx_events_stream_id_and_global_position ON public.events USING btree (stream_id, global_position);
21
21
 
22
- ALTER TABLE ONLY public.events
23
- ADD CONSTRAINT events_stream_fk FOREIGN KEY (stream_id) REFERENCES public.streams (id) ON DELETE CASCADE;
24
- ALTER TABLE ONLY public.events
25
- ADD CONSTRAINT events_event_type_fk FOREIGN KEY (event_type_id) REFERENCES public.event_types (id);
26
- ALTER TABLE ONLY public.events
27
- ADD CONSTRAINT events_link_fk FOREIGN KEY (link_id) REFERENCES public.events (id) ON DELETE CASCADE;
22
+ CREATE INDEX idx_events_id ON public.events USING btree (id);
23
+ CREATE INDEX idx_events_link_id ON public.events USING btree (link_id);
24
+ CREATE INDEX idx_events_global_position ON public.events USING btree (global_position);
@@ -0,0 +1,16 @@
1
+ CREATE TABLE public.partitions
2
+ (
3
+ id bigserial NOT NULL,
4
+ context character varying COLLATE "POSIX" NOT NULL,
5
+ stream_name character varying COLLATE "POSIX",
6
+ event_type character varying COLLATE "POSIX",
7
+ table_name character varying COLLATE "POSIX" NOT NULL
8
+ );
9
+
10
+ ALTER TABLE ONLY public.partitions
11
+ ADD CONSTRAINT partitions_pkey PRIMARY KEY (id);
12
+
13
+ CREATE UNIQUE INDEX idx_partitions_by_context ON public.partitions USING btree (context) WHERE stream_name IS NULL AND event_type IS NULL;
14
+ CREATE UNIQUE INDEX idx_partitions_by_context_and_stream_name ON public.partitions USING btree (context, stream_name) WHERE event_type IS NULL;
15
+ CREATE UNIQUE INDEX idx_partitions_by_context_and_stream_name_and_event_type ON public.partitions USING btree (context, stream_name, event_type);
16
+ CREATE UNIQUE INDEX idx_partitions_by_partition_table_name ON public.partitions USING btree (table_name);
@@ -10,7 +10,7 @@ Configuration options:
10
10
  | event_class_resolver | `#call` | `PgEventstore::EventClassResolver.new` | A `#call`-able object that accepts a string and returns an event's class. See **Resolving events classes** chapter bellow for more info. |
11
11
  | connection_pool_size | Integer | `5` | Max number of connections per ruby process. It must equal the number of threads of your application. When using subscriptions it is recommended to set it to the number of subscriptions divided by two or greater. See [**Picking max connections number**](#picking-max-connections-number) chapter of this section. |
12
12
  | connection_pool_timeout | Integer | `5` | Time in seconds to wait for a connection in the pool to be released. If no connections are available during this time - `ConnectionPool::TimeoutError` will be raised. See `connection_pool` gem [docs](https://github.com/mperham/connection_pool#usage) for more info. |
13
- | subscription_pull_interval | Float | `1.0` | How often to pull new subscription events in seconds. |
13
+ | subscription_pull_interval | Float | `1.0` | How often to pull new subscription events in seconds. The minimum meaningful value is `0.2`. Values less than `0.2` will act as it is `0.2`. |
14
14
  | subscription_max_retries | Integer | `5` | Max number of retries of failed subscription. |
15
15
  | subscription_retries_interval | Integer | `1` | Interval in seconds between retries of failed subscriptions. |
16
16
  | subscriptions_set_max_retries | Integer | `10` | Max number of retries for failed subscription sets. |
@@ -22,7 +22,7 @@
22
22
  Example:
23
23
 
24
24
  ```ruby
25
- PgEventstore::Event.new(data: { 'foo' => 'bar' })
25
+ PgEventstore::Event.new(data: { 'foo' => 'bar' }, type: 'FooChanged')
26
26
  ```
27
27
 
28
28
  ## Stream object
@@ -32,8 +32,6 @@ To be able to manipulate a stream, you have to compute a stream's object first.
32
32
  - `context` - String(required). A Bounded Context, read more [here](https://martinfowler.com/bliki/BoundedContext.html). Values which start from `$` sign are reserved by `pg_eventstore`. Such contexts can't be used to append events.
33
33
  - `stream_name` - String(required). A stream name.
34
34
  - `stream_id` - String(required). A stream id.
35
- - `id` - Integer(optional, read only). Internal id. It is set when a stream is returned from the database as part of the deserialization process. Manually assigning this attribute has no effect.
36
- - `stream_revision` - Integer(optional, read only). Current stream's revision. You can rely on this value when setting the `:expected_revision` option when appending events to a stream. It is set when a stream is returned from the database a part of the deserialization process. Manually assigning this attribute has no effect.
37
35
 
38
36
  Example:
39
37
 
@@ -43,3 +41,7 @@ PgEventstore::Stream.new(context: 'Sales', stream_name: 'Customer', stream_id: '
43
41
  ```
44
42
 
45
43
  There is a special stream, called the "all" stream. You can get this object by calling the`PgEventstore::Stream.all_stream` method. Read more about the "all" stream in the `Reading from the "all" stream` section of [Reading events](reading_events.md) chapter.
44
+
45
+ ## Important note
46
+
47
+ Because the database is designed for Eventsourcing, some limitations should be met - a combination of `Event#type`, `Stream#context` and `Stream#stream_name` must have low cardinality(low unique values number). This means you should pre-defined values there. Otherwise it may lead to the performance degradation. See [How it works](how_it_works.md) chapter for the details.
@@ -0,0 +1,80 @@
1
+ # How it works
2
+
3
+ ## Database architecture
4
+
5
+ The database is designed specifically for Eventsourcing using Domain-Driven Design. `events` table is partitioned in next way:
6
+
7
+ - For each `Stream#context` there is a subpartition of `events` table. Those tables have `contexts_` prefix.
8
+ - For each `Stream#stream_name` there is a subpartition of `contexts_` table. Those tables have `stream_names_` prefix.
9
+ - For each `Event#type` there is a subpartition of `stream_names_` table. Those tables have `event_types_` prefix.
10
+
11
+ To implement partitions - Declarative Partitioning is used. Partitioning means that you should not have any random values in the combination of `Stream#context`, `Stream#stream_name` and `Event#type`. A combination of those values must have low cardinality(low distinct values number) and must be pre-defined in your application. Otherwise it will lead to the performance degradation. More about PostgreSQL partitions is [here](https://www.postgresql.org/docs/current/ddl-partitioning.html),
12
+
13
+ So, let's say you want to publish next event:
14
+
15
+ ```ruby
16
+ stream = PgEventstore::Stream.new(context: 'SomeCtx', stream_name: 'SomeStream', stream_id: '1')
17
+ event = PgEventstore::Event.new(type: 'SomethingChanged', data: { foo: :bar })
18
+ PgEventstore.client.append_to_stream(stream, event)
19
+ ```
20
+
21
+ To actually create `events` record next partitions will be created:
22
+
23
+ - `contexts_81820a` table which is a subpartition of `events` table. It is needed to handle all events which comes to `"SomeCtx"` context
24
+ - `stream_names_ecb803` table which is a subpartition of `contexts_81820a` table. It is needed to handle all events which comes to `"SomeStream"` stream name of `"SomeCtx"` context
25
+ - `event_types_aeadd5` table which is a subpartition of `stream_names_ecb803` table. It is needed to handle all events which have `"SomethingChanged"` event type of `"SomeStream"` stream name of `"SomeCtx"` context
26
+
27
+ You can check all partitions and associated with them contexts, stream names and event types by querying `partitions` table. Example(based on the publish sample above):
28
+
29
+ ```ruby
30
+ PgEventstore.connection.with do |conn|
31
+ conn.exec('select * from partitions')
32
+ end.to_a
33
+ # =>
34
+ # [{"id"=>1, "context"=>"SomeCtx", "stream_name"=>nil, "event_type"=>nil, "table_name"=>"contexts_81820a"},
35
+ # {"id"=>2, "context"=>"SomeCtx", "stream_name"=>"SomeStream", "event_type"=>nil, "table_name"=>"stream_names_ecb803"},
36
+ # {"id"=>3, "context"=>"SomeCtx", "stream_name"=>"SomeStream", "event_type"=>"SomethingChanged", "table_name"=>"event_types_aeadd5"}]
37
+ ```
38
+
39
+ ## Appending events and multiple commands
40
+
41
+ You may want to get familiar with [Appending events](appending_events.md) and [multiple commands](multiple_commands.md) first.
42
+
43
+ `pg_eventstore` internally uses `Serializable` transaction isolation level(more about different transaction isolation levels in PostgreSQL is [here](https://www.postgresql.org/docs/current/transaction-iso.html)). On practice this means that any transaction may fail with serialization error, and the common approach is to restart this transaction. For ruby this means re-execution of the block of code. Which is why there is a warning regarding potential block re-execution when using `#multiple`. However current implementation allows to limit 99% of retries to the manipulations with one stream. For example, when two parallel processes changing the same stream. If different streams are being changed at the same time - it is less likely it would perform retry.
44
+
45
+ Examples:
46
+
47
+ - if "process 1" and "process 2" perform the append command at the same time - one of the append commands will be retried:
48
+ ```ruby
49
+ # process 1
50
+ stream = PgEventstore::Stream.new(context: 'MyCtx', stream_name: 'MyStream', stream_id: '1')
51
+ event = PgEventstore::Event.new(type: 'SomethingChanged', data: { foo: :bar })
52
+ PgEventstore.client.append_to_stream(stream, event)
53
+
54
+ # process 2
55
+ stream = PgEventstore::Stream.new(context: 'MyCtx', stream_name: 'MyStream', stream_id: '1')
56
+ event = PgEventstore::Event.new(type: 'SomethingElseChanged', data: { baz: :bar })
57
+ PgEventstore.client.append_to_stream(stream, event)
58
+ ```
59
+
60
+ - if "process 1" performs multiple commands at the same time "process 2" performs append command which involves the same stream from "process 1" - either block of `#multiple` or `#append_to_stream` will be retried:
61
+
62
+ ```ruby
63
+ # process 1
64
+ stream1 = PgEventstore::Stream.new(context: 'MyCtx', stream_name: 'MyStream1', stream_id: '1')
65
+ stream2 = PgEventstore::Stream.new(context: 'MyCtx', stream_name: 'MyStream2', stream_id: '1')
66
+ event = PgEventstore::Event.new(type: 'SomethingChanged', data: { foo: :bar })
67
+ PgEventstore.client.multiple do
68
+ PgEventstore.client.append_to_stream(stream1, event)
69
+ PgEventstore.client.append_to_stream(stream2, event)
70
+ end
71
+
72
+ # process 2
73
+ stream2 = PgEventstore::Stream.new(context: 'MyCtx', stream_name: 'MyStream2', stream_id: '1')
74
+ event = PgEventstore::Event.new(type: 'SomethingChanged', data: { foo: :bar })
75
+ PgEventstore.client.append_to_stream(stream2, event)
76
+ ```
77
+
78
+ Retries also concern your potential implementation of [middlewares](writing_middleware.md). For example, `YourAwesomeMiddleware#serialize` can be executed several times when append the event. This is especially important when you involve your microservices here - they can receive the same payload several times.
79
+
80
+ Conclusion. When developing using `pg_eventstore` - always keep in mind that some parts of your implementation can be executed several times before successfully publishing an event, or event when reading events(`#deserializa` middleware method) if you perform reading withing `#multiple` block.
@@ -29,7 +29,9 @@ module PgEventstore
29
29
  result =
30
30
  Commands::Append.new(
31
31
  Queries.new(
32
- streams: stream_queries, events: event_queries(middlewares(middlewares)), transactions: transaction_queries
32
+ partitions: partition_queries,
33
+ events: event_queries(middlewares(middlewares)),
34
+ transactions: transaction_queries
33
35
  )
34
36
  ).call(stream, *events_or_event, options: options)
35
37
  events_or_event.is_a?(Array) ? result : result.first
@@ -105,7 +107,7 @@ module PgEventstore
105
107
  # @raise [PgEventstore::StreamNotFoundError]
106
108
  def read(stream, options: {}, middlewares: nil)
107
109
  Commands::Read.
108
- new(Queries.new(streams: stream_queries, events: event_queries(middlewares(middlewares)))).
110
+ new(Queries.new(partitions: partition_queries, events: event_queries(middlewares(middlewares)))).
109
111
  call(stream, options: { max_count: config.max_count }.merge(options))
110
112
  end
111
113
 
@@ -114,7 +116,7 @@ module PgEventstore
114
116
  def read_paginated(stream, options: {}, middlewares: nil)
115
117
  cmd_class = stream.system? ? Commands::SystemStreamReadPaginated : Commands::RegularStreamReadPaginated
116
118
  cmd_class.
117
- new(Queries.new(streams: stream_queries, events: event_queries(middlewares(middlewares)))).
119
+ new(Queries.new(partitions: partition_queries, events: event_queries(middlewares(middlewares)))).
118
120
  call(stream, options: { max_count: config.max_count }.merge(options))
119
121
  end
120
122
 
@@ -133,7 +135,9 @@ module PgEventstore
133
135
  result =
134
136
  Commands::LinkTo.new(
135
137
  Queries.new(
136
- streams: stream_queries, events: event_queries(middlewares(middlewares)), transactions: transaction_queries
138
+ partitions: partition_queries,
139
+ events: event_queries(middlewares(middlewares)),
140
+ transactions: transaction_queries
137
141
  )
138
142
  ).call(stream, *events_or_event, options: options)
139
143
  events_or_event.is_a?(Array) ? result : result.first
@@ -154,9 +158,9 @@ module PgEventstore
154
158
  PgEventstore.connection(config.name)
155
159
  end
156
160
 
157
- # @return [PgEventstore::StreamQueries]
158
- def stream_queries
159
- StreamQueries.new(connection)
161
+ # @return [PgEventstore::PartitionQueries]
162
+ def partition_queries
163
+ PartitionQueries.new(connection)
160
164
  end
161
165
 
162
166
  # @return [PgEventstore::TransactionQueries]
@@ -16,19 +16,28 @@ module PgEventstore
16
16
  raise SystemStreamError, stream if stream.system?
17
17
 
18
18
  queries.transactions.transaction do
19
- stream = queries.streams.find_or_create_stream(stream)
20
- revision = stream.stream_revision
19
+ revision = queries.events.stream_revision(stream) || Stream::NON_EXISTING_STREAM_REVISION
21
20
  assert_expected_revision!(revision, options[:expected_revision], stream) if options[:expected_revision]
22
- events.map.with_index(1) do |event, index|
23
- queries.events.insert(stream, event_modifier.call(event, revision + index))
24
- end.tap do
25
- queries.streams.update_stream_revision(stream, revision + events.size)
21
+ formatted_events = events.map.with_index(1) do |event, index|
22
+ event_modifier.call(event, revision + index)
26
23
  end
24
+ create_partitions(stream, formatted_events)
25
+ queries.events.insert(stream, formatted_events)
27
26
  end
28
27
  end
29
28
 
30
29
  private
31
30
 
31
+ # @param stream [PgEventstore::Stream]
32
+ # @param events [Array<PgEventstore::Event>]
33
+ # @return [void]
34
+ def create_partitions(stream, events)
35
+ missing_event_types = events.map(&:type).map(&:to_s).uniq.select do |event_type|
36
+ queries.partitions.partition_required?(stream, event_type)
37
+ end
38
+ raise MissingPartitions.new(stream, missing_event_types) if missing_event_types.any?
39
+ end
40
+
32
41
  # @param revision [Integer]
33
42
  # @param expected_revision [Symbol, Integer]
34
43
  # @param stream [PgEventstore::Stream]
@@ -46,12 +55,12 @@ module PgEventstore
46
55
  end
47
56
 
48
57
  in [Integer, Symbol]
49
- if revision == Stream::INITIAL_STREAM_REVISION && expected_revision == :stream_exists
58
+ if revision == Stream::NON_EXISTING_STREAM_REVISION && expected_revision == :stream_exists
50
59
  raise WrongExpectedRevisionError.new(
51
60
  revision: revision, expected_revision: expected_revision, stream: stream
52
61
  )
53
62
  end
54
- if revision > Stream::INITIAL_STREAM_REVISION && expected_revision == :no_stream
63
+ if revision > Stream::NON_EXISTING_STREAM_REVISION && expected_revision == :no_stream
55
64
  raise WrongExpectedRevisionError.new(
56
65
  revision: revision, expected_revision: expected_revision, stream: stream
57
66
  )
@@ -13,7 +13,7 @@ module PgEventstore
13
13
  # @raise [PgEventstore::WrongExpectedRevisionError]
14
14
  # @raise [PgEventstore::NotPersistedEventError]
15
15
  def call(stream, *events, options: {})
16
- events.each(&method(:check_id_presence))
16
+ events.each(&method(:check_event_presence))
17
17
  append_cmd = Append.new(queries)
18
18
  append_cmd.call(stream, *events, options: options, event_modifier: EventModifiers::PrepareLinkEvent)
19
19
  end
@@ -23,8 +23,8 @@ module PgEventstore
23
23
  # Checks if Event#id is present. An event must have the #id value in order to be linked.
24
24
  # @param event [PgEventstore::Event]
25
25
  # @return [void]
26
- def check_id_presence(event)
27
- return unless event.id.nil?
26
+ def check_event_presence(event)
27
+ return if queries.events.event_exists?(event.id)
28
28
 
29
29
  raise NotPersistedEventError, event
30
30
  end
@@ -15,7 +15,7 @@ module PgEventstore
15
15
  # @return [Array<PgEventstore::Event>]
16
16
  # @raise [PgEventstore::StreamNotFoundError]
17
17
  def call(stream, options: {})
18
- stream = queries.streams.find_stream(stream) || raise(StreamNotFoundError, stream) unless stream.all_stream?
18
+ queries.events.stream_revision(stream) || raise(StreamNotFoundError, stream) unless stream.all_stream?
19
19
 
20
20
  queries.events.stream_events(stream, options)
21
21
  end
@@ -58,9 +58,11 @@ module PgEventstore
58
58
 
59
59
  # @return [String]
60
60
  def user_friendly_message
61
- return expected_stream_exists if revision == -1 && expected_revision == :stream_exists
62
- return expected_no_stream if revision > -1 && expected_revision == :no_stream
63
- return current_no_stream if revision == -1 && expected_revision.is_a?(Integer)
61
+ if revision == Stream::NON_EXISTING_STREAM_REVISION && expected_revision == :stream_exists
62
+ return expected_stream_exists
63
+ end
64
+ return expected_no_stream if revision > Stream::NON_EXISTING_STREAM_REVISION && expected_revision == :no_stream
65
+ return current_no_stream if revision == Stream::NON_EXISTING_STREAM_REVISION && expected_revision.is_a?(Integer)
64
66
 
65
67
  unmatched_stream_revision
66
68
  end
@@ -189,9 +191,18 @@ module PgEventstore
189
191
 
190
192
  # @return [String]
191
193
  def user_friendly_message
192
- <<~TEXT.strip
193
- Event#id must be present, got #{event.id.inspect} instead.
194
- TEXT
194
+ "Event with #id #{event.id.inspect} must be present, but it could not be found."
195
+ end
196
+ end
197
+
198
+ class MissingPartitions < Error
199
+ attr_reader :stream, :event_types
200
+
201
+ # @param stream [PgEventstore::Stream]
202
+ # @param event_types [Array<String>]
203
+ def initialize(stream, event_types)
204
+ @stream = stream
205
+ @event_types = event_types
195
206
  end
196
207
  end
197
208
  end
@@ -31,6 +31,10 @@ module PgEventstore
31
31
  # @return [String, nil] UUIDv4 of an event the current event points to. If it is not nil, then the current
32
32
  # event is a link
33
33
  attribute(:link_id)
34
+ # @!attribute link
35
+ # @return [PgEventstore::Event, nil] when resolve_link_tos: true option is provided during the read of events and
36
+ # event is a link event - this attribute will be pointing on that link
37
+ attribute(:link)
34
38
  # @!attribute created_at
35
39
  # @return [Time, nil] a timestamp an event was created at
36
40
  attribute(:created_at)
@@ -41,7 +45,7 @@ module PgEventstore
41
45
  def ==(other)
42
46
  return false unless other.is_a?(PgEventstore::Event)
43
47
 
44
- attributes_hash == other.attributes_hash
48
+ attributes_hash.except(:link) == other.attributes_hash.except(:link)
45
49
  end
46
50
 
47
51
  # Detect whether an event is a link event
@@ -24,7 +24,10 @@ module PgEventstore
24
24
  middlewares.each do |middleware|
25
25
  middleware.deserialize(event)
26
26
  end
27
- event.stream = PgEventstore::Stream.new(**attrs['stream'].transform_keys(&:to_sym)) if attrs.key?('stream')
27
+ event.stream = PgEventstore::Stream.new(
28
+ **attrs.slice('context', 'stream_name', 'stream_id').transform_keys(&:to_sym)
29
+ )
30
+ event.link = without_middlewares.deserialize(attrs['link']) if attrs.key?('link')
28
31
  event
29
32
  end
30
33
 
@@ -15,46 +15,90 @@ module PgEventstore
15
15
  @deserializer = deserializer
16
16
  end
17
17
 
18
+ # @param id [String, nil]
19
+ # @return [Boolean]
20
+ def event_exists?(id)
21
+ return false if id.nil?
22
+
23
+ sql_builder = SQLBuilder.new.select('1 as exists').from('events').where('id = ?', id).limit(1)
24
+ connection.with do |conn|
25
+ conn.exec_params(*sql_builder.to_exec_params)
26
+ end.to_a.dig(0, 'exists') == 1
27
+ end
28
+
29
+ # @param stream [PgEventstore::Stream]
30
+ # @return [Integer, nil]
31
+ def stream_revision(stream)
32
+ sql_builder = SQLBuilder.new.from('events').select('stream_revision').
33
+ where('context = ? and stream_name = ? and stream_id = ?', *stream.to_a).
34
+ order('stream_revision DESC').
35
+ limit(1)
36
+ connection.with do |conn|
37
+ conn.exec_params(*sql_builder.to_exec_params)
38
+ end.to_a.dig(0, 'stream_revision')
39
+ end
40
+
18
41
  # @see PgEventstore::Client#read for more info
19
42
  # @param stream [PgEventstore::Stream]
20
43
  # @param options [Hash]
21
44
  # @return [Array<PgEventstore::Event>]
22
45
  def stream_events(stream, options)
23
- options = event_type_queries.include_event_types_ids(options)
24
46
  exec_params = events_filtering(stream, options).to_exec_params
25
47
  raw_events = connection.with do |conn|
26
48
  conn.exec_params(*exec_params)
27
49
  end.to_a
28
- preloader.preload_related_objects(raw_events)
50
+ raw_events = links_resolver.resolve(raw_events) if options[:resolve_link_tos]
29
51
  deserializer.deserialize_many(raw_events)
30
52
  end
31
53
 
32
- # @param stream [PgEventstore::Stream] persisted stream
33
- # @param event [PgEventstore::Event]
54
+ # @param stream [PgEventstore::Stream]
55
+ # @param events [Array<PgEventstore::Event>]
34
56
  # @return [PgEventstore::Event]
35
- def insert(stream, event)
36
- serializer.serialize(event)
37
-
38
- attributes = event.options_hash.slice(:id, :data, :metadata, :stream_revision, :link_id).compact
39
- attributes[:stream_id] = stream.id
40
- attributes[:event_type_id] = event_type_queries.find_or_create_type(event.type)
57
+ def insert(stream, events)
58
+ sql_rows_for_insert, values = prepared_statements(stream, events)
59
+ columns = %w[id data metadata stream_revision link_id type context stream_name stream_id]
41
60
 
42
61
  sql = <<~SQL
43
- INSERT INTO events (#{attributes.keys.join(', ')})
44
- VALUES (#{Utils.positional_vars(attributes.values)})
45
- RETURNING *, $#{attributes.values.size + 1} as type
62
+ INSERT INTO events (#{columns.join(', ')})
63
+ VALUES #{sql_rows_for_insert.join(", ")}
64
+ RETURNING *
46
65
  SQL
47
66
 
48
- raw_event = connection.with do |conn|
49
- conn.exec_params(sql, [*attributes.values, event.type])
50
- end.to_a.first
51
- deserializer.without_middlewares.deserialize(raw_event).tap do |persisted_event|
52
- persisted_event.stream = stream
67
+ connection.with do |conn|
68
+ conn.exec_params(sql, values)
69
+ end.map do |raw_event|
70
+ deserializer.without_middlewares.deserialize(raw_event)
53
71
  end
54
72
  end
55
73
 
56
74
  private
57
75
 
76
+ # @param stream [PgEventstore::Stream]
77
+ # @param events [Array<PgEventstore::Event>]
78
+ # @return [Array<Array<String>, Array<Object>>]
79
+ def prepared_statements(stream, events)
80
+ positional_counter = 1
81
+ values = []
82
+ sql_rows_for_insert = events.map do |event|
83
+ event = serializer.serialize(event)
84
+ attributes = event.options_hash.slice(:id, :data, :metadata, :stream_revision, :link_id, :type)
85
+
86
+ attributes = attributes.merge(stream.to_hash)
87
+ prepared = attributes.values.map do |value|
88
+ if value.nil?
89
+ 'DEFAULT'
90
+ else
91
+ "$#{positional_counter}".tap do
92
+ values.push(value)
93
+ positional_counter += 1
94
+ end
95
+ end
96
+ end
97
+ "(#{prepared.join(',')})"
98
+ end
99
+ [sql_rows_for_insert, values]
100
+ end
101
+
58
102
  # @param stream [PgEventstore::Stream]
59
103
  # @param options [Hash]
60
104
  # @return [PgEventstore::EventsFilteringQuery]
@@ -64,14 +108,9 @@ module PgEventstore
64
108
  QueryBuilders::EventsFiltering.specific_stream_filtering(stream, options)
65
109
  end
66
110
 
67
- # @return [PgEventstore::EventTypeQueries]
68
- def event_type_queries
69
- EventTypeQueries.new(connection)
70
- end
71
-
72
- # @return [PgEventstore::Preloader]
73
- def preloader
74
- Preloader.new(connection)
111
+ # @return [PgEventstore::LinksResolver]
112
+ def links_resolver
113
+ LinksResolver.new(connection)
75
114
  end
76
115
  end
77
116
  end
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PgEventstore
4
+ # @!visibility private
5
+ class LinksResolver
6
+ attr_reader :connection
7
+ private :connection
8
+
9
+ # @param connection [PgEventstore::Connection]
10
+ def initialize(connection)
11
+ @connection = connection
12
+ end
13
+
14
+ # @param raw_events [Array<Hash>]
15
+ def resolve(raw_events)
16
+ ids = raw_events.map { _1['link_id'] }.compact.uniq
17
+ return raw_events if ids.empty?
18
+
19
+ original_events = connection.with do |conn|
20
+ conn.exec_params('select * from events where id = ANY($1::uuid[])', [ids])
21
+ end.to_h { |attrs| [attrs['id'], attrs] }
22
+
23
+ raw_events.map do |attrs|
24
+ original_event = original_events[attrs['link_id']]
25
+ next attrs unless original_event
26
+
27
+ original_event.merge('link' => attrs).merge(attrs.except(*original_event.keys))
28
+ end
29
+ end
30
+ end
31
+ end