pg_eventstore 0.9.0 → 0.10.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +9 -0
  3. data/db/migrations/{3_create_events.sql → 1_create_events.sql} +11 -14
  4. data/db/migrations/5_partitions.sql +16 -0
  5. data/docs/events_and_streams.md +5 -3
  6. data/docs/how_it_works.md +80 -0
  7. data/lib/pg_eventstore/client.rb +11 -7
  8. data/lib/pg_eventstore/commands/append.rb +17 -8
  9. data/lib/pg_eventstore/commands/link_to.rb +3 -3
  10. data/lib/pg_eventstore/commands/read.rb +1 -1
  11. data/lib/pg_eventstore/errors.rb +17 -6
  12. data/lib/pg_eventstore/event_deserializer.rb +3 -1
  13. data/lib/pg_eventstore/queries/event_queries.rb +61 -28
  14. data/lib/pg_eventstore/queries/partition_queries.rb +184 -0
  15. data/lib/pg_eventstore/queries/subscription_queries.rb +2 -10
  16. data/lib/pg_eventstore/queries/transaction_queries.rb +13 -0
  17. data/lib/pg_eventstore/queries.rb +4 -6
  18. data/lib/pg_eventstore/query_builders/events_filtering_query.rb +10 -18
  19. data/lib/pg_eventstore/rspec/test_helpers.rb +16 -1
  20. data/lib/pg_eventstore/sql_builder.rb +34 -4
  21. data/lib/pg_eventstore/stream.rb +3 -8
  22. data/lib/pg_eventstore/version.rb +1 -1
  23. metadata +9 -11
  24. data/db/migrations/1_create_streams.sql +0 -13
  25. data/db/migrations/2_create_event_types.sql +0 -10
  26. data/lib/pg_eventstore/queries/event_type_queries.rb +0 -74
  27. data/lib/pg_eventstore/queries/preloader.rb +0 -37
  28. data/lib/pg_eventstore/queries/stream_queries.rb +0 -77
  29. /data/db/migrations/{4_create_subscriptions.sql → 2_create_subscriptions.sql} +0 -0
  30. /data/db/migrations/{5_create_subscription_commands.sql → 3_create_subscription_commands.sql} +0 -0
  31. /data/db/migrations/{6_create_subscriptions_set_commands.sql → 4_create_subscriptions_set_commands.sql} +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 4ca951a11a040b193a289feb28627294328acebaa929730928749ddddeb41a83
4
- data.tar.gz: 4af9d3c3ee4b4b4ce77092bc699cb1ad97d9a9f8e5fde4ff9d1d8078c2385efc
3
+ metadata.gz: 9426880dd6354875d896f5bf37abeac180c4f04b26905db0efc035fec316e6b9
4
+ data.tar.gz: 355a6c370451fff9e391aa0ecb1c6abf105c6fc537d99f6b7371359067dd2ee4
5
5
  SHA512:
6
- metadata.gz: 68baa83af9884c14f312e85f4ef619dd9365ffc273b0a629b14c466f25d393de884c2fdca02849b03f40a87535f6253751d3bc4eb1d702e630cceb0fd511c0a7
7
- data.tar.gz: e2c14d9b5767c3a55dda6a594345159e11c5a5e04c18060ddcb587ba40a011be84936c0a36c746ad40928d97a321e468890b37108c0129e4cdb2bda17996d4a5
6
+ metadata.gz: fc96e847b892e375115a08985678328d166742e5f6be3ee967f67dc808c80166a58f010555567b05fbac172b31c7d98276d1e898de6889d5d53d3ccf0ab2be47
7
+ data.tar.gz: cba82b0c20d4104cd6878fd2471e5c4d0c70e60991f573cd5aa1a845e7e5b443cf9f15448b81754888a84883c6bfbaf932e452c13fd7d5d229b03185741f1dbd
data/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [0.10.1] - 2024-03-12
4
+
5
+ - Handle edge case when creating partitions
6
+
7
+ ## [0.10.0] - 2024-03-12
8
+
9
+ - Reimplement db structure
10
+ - Optimize `#append_to_stream` method - it now produces one `INSERT` query when publishing multiple events
11
+
3
12
  ## [0.9.0] - 2024-02-23
4
13
 
5
14
  - Use POSIX locale for streams and event types
@@ -1,27 +1,24 @@
1
1
  CREATE TABLE public.events
2
2
  (
3
3
  id uuid DEFAULT public.gen_random_uuid() NOT NULL,
4
- stream_id bigint NOT NULL,
4
+ context character varying COLLATE "POSIX" NOT NULL,
5
+ stream_name character varying COLLATE "POSIX" NOT NULL,
6
+ stream_id character varying COLLATE "POSIX" NOT NULL,
5
7
  global_position bigserial NOT NULL,
6
8
  stream_revision integer NOT NULL,
7
9
  data jsonb DEFAULT '{}'::jsonb NOT NULL,
8
10
  metadata jsonb DEFAULT '{}'::jsonb NOT NULL,
9
11
  link_id uuid,
10
12
  created_at timestamp without time zone DEFAULT now() NOT NULL,
11
- event_type_id bigint NOT NULL
12
- );
13
+ type character varying COLLATE "POSIX" NOT NULL
14
+ ) PARTITION BY LIST (context);
13
15
 
14
16
  ALTER TABLE ONLY public.events
15
- ADD CONSTRAINT events_pkey PRIMARY KEY (id);
17
+ ADD CONSTRAINT events_pkey PRIMARY KEY (context, stream_name, type, global_position);
16
18
 
17
- CREATE INDEX idx_events_event_type_id_and_global_position ON public.events USING btree (event_type_id, global_position);
18
- CREATE INDEX idx_events_global_position ON public.events USING btree (global_position);
19
- CREATE INDEX idx_events_link_id ON public.events USING btree (link_id);
20
- CREATE INDEX idx_events_stream_id_and_revision ON public.events USING btree (stream_id, stream_revision);
19
+ CREATE INDEX idx_events_stream_id_and_stream_revision ON public.events USING btree (stream_id, stream_revision);
20
+ CREATE INDEX idx_events_stream_id_and_global_position ON public.events USING btree (stream_id, global_position);
21
21
 
22
- ALTER TABLE ONLY public.events
23
- ADD CONSTRAINT events_stream_fk FOREIGN KEY (stream_id) REFERENCES public.streams (id) ON DELETE CASCADE;
24
- ALTER TABLE ONLY public.events
25
- ADD CONSTRAINT events_event_type_fk FOREIGN KEY (event_type_id) REFERENCES public.event_types (id);
26
- ALTER TABLE ONLY public.events
27
- ADD CONSTRAINT events_link_fk FOREIGN KEY (link_id) REFERENCES public.events (id) ON DELETE CASCADE;
22
+ CREATE INDEX idx_events_id ON public.events USING btree (id);
23
+ CREATE INDEX idx_events_link_id ON public.events USING btree (link_id);
24
+ CREATE INDEX idx_events_global_position ON public.events USING btree (global_position);
@@ -0,0 +1,16 @@
1
+ CREATE TABLE public.partitions
2
+ (
3
+ id bigserial NOT NULL,
4
+ context character varying COLLATE "POSIX" NOT NULL,
5
+ stream_name character varying COLLATE "POSIX",
6
+ event_type character varying COLLATE "POSIX",
7
+ table_name character varying COLLATE "POSIX" NOT NULL
8
+ );
9
+
10
+ ALTER TABLE ONLY public.partitions
11
+ ADD CONSTRAINT partitions_pkey PRIMARY KEY (id);
12
+
13
+ CREATE UNIQUE INDEX idx_partitions_by_context ON public.partitions USING btree (context) WHERE stream_name IS NULL AND event_type IS NULL;
14
+ CREATE UNIQUE INDEX idx_partitions_by_context_and_stream_name ON public.partitions USING btree (context, stream_name) WHERE event_type IS NULL;
15
+ CREATE UNIQUE INDEX idx_partitions_by_context_and_stream_name_and_event_type ON public.partitions USING btree (context, stream_name, event_type);
16
+ CREATE UNIQUE INDEX idx_partitions_by_partition_table_name ON public.partitions USING btree (table_name);
@@ -22,7 +22,7 @@
22
22
  Example:
23
23
 
24
24
  ```ruby
25
- PgEventstore::Event.new(data: { 'foo' => 'bar' })
25
+ PgEventstore::Event.new(data: { 'foo' => 'bar' }, type: 'FooChanged')
26
26
  ```
27
27
 
28
28
  ## Stream object
@@ -32,8 +32,6 @@ To be able to manipulate a stream, you have to compute a stream's object first.
32
32
  - `context` - String(required). A Bounded Context, read more [here](https://martinfowler.com/bliki/BoundedContext.html). Values which start from `$` sign are reserved by `pg_eventstore`. Such contexts can't be used to append events.
33
33
  - `stream_name` - String(required). A stream name.
34
34
  - `stream_id` - String(required). A stream id.
35
- - `id` - Integer(optional, read only). Internal id. It is set when a stream is returned from the database as part of the deserialization process. Manually assigning this attribute has no effect.
36
- - `stream_revision` - Integer(optional, read only). Current stream's revision. You can rely on this value when setting the `:expected_revision` option when appending events to a stream. It is set when a stream is returned from the database a part of the deserialization process. Manually assigning this attribute has no effect.
37
35
 
38
36
  Example:
39
37
 
@@ -43,3 +41,7 @@ PgEventstore::Stream.new(context: 'Sales', stream_name: 'Customer', stream_id: '
43
41
  ```
44
42
 
45
43
  There is a special stream, called the "all" stream. You can get this object by calling the`PgEventstore::Stream.all_stream` method. Read more about the "all" stream in the `Reading from the "all" stream` section of [Reading events](reading_events.md) chapter.
44
+
45
+ ## Important note
46
+
47
+ Because the database is designed for Eventsourcing, some limitations should be met - a combination of `Event#type`, `Stream#context` and `Stream#stream_name` must have low cardinality(low unique values number). This means you should pre-defined values there. Otherwise it may lead to the performance degradation. See [How it works](how_it_works.md) chapter for the details.
@@ -0,0 +1,80 @@
1
+ # How it works
2
+
3
+ ## Database architecture
4
+
5
+ The database is designed specifically for Eventsourcing using Domain-Driven Design. `events` table is partitioned in next way:
6
+
7
+ - For each `Stream#context` there is a subpartition of `events` table. Those tables have `contexts_` prefix.
8
+ - For each `Stream#stream_name` there is a subpartition of `contexts_` table. Those tables have `stream_names_` prefix.
9
+ - For each `Event#type` there is a subpartition of `stream_names_` table. Those tables have `event_types_` prefix.
10
+
11
+ To implement partitions - Declarative Partitioning is used. Partitioning means that you should not have any random values in the combination of `Stream#context`, `Stream#stream_name` and `Event#type`. A combination of those values must have low cardinality(low distinct values number) and must be pre-defined in your application. Otherwise it will lead to the performance degradation. More about PostgreSQL partitions is [here](https://www.postgresql.org/docs/current/ddl-partitioning.html),
12
+
13
+ So, let's say you want to publish next event:
14
+
15
+ ```ruby
16
+ stream = PgEventstore::Stream.new(context: 'SomeCtx', stream_name: 'SomeStream', stream_id: '1')
17
+ event = PgEventstore::Event.new(type: 'SomethingChanged', data: { foo: :bar })
18
+ PgEventstore.client.append_to_stream(stream, event)
19
+ ```
20
+
21
+ To actually create `events` record next partitions will be created:
22
+
23
+ - `contexts_81820a` table which is a subpartition of `events` table. It is needed to handle all events which comes to `"SomeCtx"` context
24
+ - `stream_names_ecb803` table which is a subpartition of `contexts_81820a` table. It is needed to handle all events which comes to `"SomeStream"` stream name of `"SomeCtx"` context
25
+ - `event_types_aeadd5` table which is a subpartition of `stream_names_ecb803` table. It is needed to handle all events which have `"SomethingChanged"` event type of `"SomeStream"` stream name of `"SomeCtx"` context
26
+
27
+ You can check all partitions and associated with them contexts, stream names and event types by querying `partitions` table. Example(based on the publish sample above):
28
+
29
+ ```ruby
30
+ PgEventstore.connection.with do |conn|
31
+ conn.exec('select * from partitions')
32
+ end.to_a
33
+ # =>
34
+ # [{"id"=>1, "context"=>"SomeCtx", "stream_name"=>nil, "event_type"=>nil, "table_name"=>"contexts_81820a"},
35
+ # {"id"=>2, "context"=>"SomeCtx", "stream_name"=>"SomeStream", "event_type"=>nil, "table_name"=>"stream_names_ecb803"},
36
+ # {"id"=>3, "context"=>"SomeCtx", "stream_name"=>"SomeStream", "event_type"=>"SomethingChanged", "table_name"=>"event_types_aeadd5"}]
37
+ ```
38
+
39
+ ## Appending events and multiple commands
40
+
41
+ You may want to get familiar with [Appending events](appending_events.md) and [multiple commands](multiple_commands.md) first.
42
+
43
+ `pg_eventstore` internally uses `Serializable` transaction isolation level(more about different transaction isolation levels in PostgreSQL is [here](https://www.postgresql.org/docs/current/transaction-iso.html)). On practice this means that any transaction may fail with serialization error, and the common approach is to restart this transaction. For ruby this means re-execution of the block of code. Which is why there is a warning regarding potential block re-execution when using `#multiple`. However current implementation allows to limit 99% of retries to the manipulations with one stream. For example, when two parallel processes changing the same stream. If different streams are being changed at the same time - it is less likely it would perform retry.
44
+
45
+ Examples:
46
+
47
+ - if "process 1" and "process 2" perform the append command at the same time - one of the append commands will be retried:
48
+ ```ruby
49
+ # process 1
50
+ stream = PgEventstore::Stream.new(context: 'MyCtx', stream_name: 'MyStream', stream_id: '1')
51
+ event = PgEventstore::Event.new(type: 'SomethingChanged', data: { foo: :bar })
52
+ PgEventstore.client.append_to_stream(stream, event)
53
+
54
+ # process 2
55
+ stream = PgEventstore::Stream.new(context: 'MyCtx', stream_name: 'MyStream', stream_id: '1')
56
+ event = PgEventstore::Event.new(type: 'SomethingElseChanged', data: { baz: :bar })
57
+ PgEventstore.client.append_to_stream(stream, event)
58
+ ```
59
+
60
+ - if "process 1" performs multiple commands at the same time "process 2" performs append command which involves the same stream from "process 1" - either block of `#multiple` or `#append_to_stream` will be retried:
61
+
62
+ ```ruby
63
+ # process 1
64
+ stream1 = PgEventstore::Stream.new(context: 'MyCtx', stream_name: 'MyStream1', stream_id: '1')
65
+ stream2 = PgEventstore::Stream.new(context: 'MyCtx', stream_name: 'MyStream2', stream_id: '1')
66
+ event = PgEventstore::Event.new(type: 'SomethingChanged', data: { foo: :bar })
67
+ PgEventstore.client.multiple do
68
+ PgEventstore.client.append_to_stream(stream1, event)
69
+ PgEventstore.client.append_to_stream(stream2, event)
70
+ end
71
+
72
+ # process 2
73
+ stream2 = PgEventstore::Stream.new(context: 'MyCtx', stream_name: 'MyStream2', stream_id: '1')
74
+ event = PgEventstore::Event.new(type: 'SomethingChanged', data: { foo: :bar })
75
+ PgEventstore.client.append_to_stream(stream2, event)
76
+ ```
77
+
78
+ Retries also concern your potential implementation of [middlewares](writing_middleware.md). For example, `YourAwesomeMiddleware#serialize` can be executed several times when append the event. This is especially important when you involve your microservices here - they can receive the same payload several times.
79
+
80
+ Conclusion. When developing using `pg_eventstore` - always keep in mind that some parts of your implementation can be executed several times before successfully publishing an event, or event when reading events(`#deserializa` middleware method) if you perform reading withing `#multiple` block.
@@ -29,7 +29,9 @@ module PgEventstore
29
29
  result =
30
30
  Commands::Append.new(
31
31
  Queries.new(
32
- streams: stream_queries, events: event_queries(middlewares(middlewares)), transactions: transaction_queries
32
+ partitions: partition_queries,
33
+ events: event_queries(middlewares(middlewares)),
34
+ transactions: transaction_queries
33
35
  )
34
36
  ).call(stream, *events_or_event, options: options)
35
37
  events_or_event.is_a?(Array) ? result : result.first
@@ -105,7 +107,7 @@ module PgEventstore
105
107
  # @raise [PgEventstore::StreamNotFoundError]
106
108
  def read(stream, options: {}, middlewares: nil)
107
109
  Commands::Read.
108
- new(Queries.new(streams: stream_queries, events: event_queries(middlewares(middlewares)))).
110
+ new(Queries.new(partitions: partition_queries, events: event_queries(middlewares(middlewares)))).
109
111
  call(stream, options: { max_count: config.max_count }.merge(options))
110
112
  end
111
113
 
@@ -114,7 +116,7 @@ module PgEventstore
114
116
  def read_paginated(stream, options: {}, middlewares: nil)
115
117
  cmd_class = stream.system? ? Commands::SystemStreamReadPaginated : Commands::RegularStreamReadPaginated
116
118
  cmd_class.
117
- new(Queries.new(streams: stream_queries, events: event_queries(middlewares(middlewares)))).
119
+ new(Queries.new(partitions: partition_queries, events: event_queries(middlewares(middlewares)))).
118
120
  call(stream, options: { max_count: config.max_count }.merge(options))
119
121
  end
120
122
 
@@ -133,7 +135,9 @@ module PgEventstore
133
135
  result =
134
136
  Commands::LinkTo.new(
135
137
  Queries.new(
136
- streams: stream_queries, events: event_queries(middlewares(middlewares)), transactions: transaction_queries
138
+ partitions: partition_queries,
139
+ events: event_queries(middlewares(middlewares)),
140
+ transactions: transaction_queries
137
141
  )
138
142
  ).call(stream, *events_or_event, options: options)
139
143
  events_or_event.is_a?(Array) ? result : result.first
@@ -154,9 +158,9 @@ module PgEventstore
154
158
  PgEventstore.connection(config.name)
155
159
  end
156
160
 
157
- # @return [PgEventstore::StreamQueries]
158
- def stream_queries
159
- StreamQueries.new(connection)
161
+ # @return [PgEventstore::PartitionQueries]
162
+ def partition_queries
163
+ PartitionQueries.new(connection)
160
164
  end
161
165
 
162
166
  # @return [PgEventstore::TransactionQueries]
@@ -16,19 +16,28 @@ module PgEventstore
16
16
  raise SystemStreamError, stream if stream.system?
17
17
 
18
18
  queries.transactions.transaction do
19
- stream = queries.streams.find_or_create_stream(stream)
20
- revision = stream.stream_revision
19
+ revision = queries.events.stream_revision(stream) || Stream::NON_EXISTING_STREAM_REVISION
21
20
  assert_expected_revision!(revision, options[:expected_revision], stream) if options[:expected_revision]
22
- events.map.with_index(1) do |event, index|
23
- queries.events.insert(stream, event_modifier.call(event, revision + index))
24
- end.tap do
25
- queries.streams.update_stream_revision(stream, revision + events.size)
21
+ formatted_events = events.map.with_index(1) do |event, index|
22
+ event_modifier.call(event, revision + index)
26
23
  end
24
+ create_partitions(stream, formatted_events)
25
+ queries.events.insert(stream, formatted_events)
27
26
  end
28
27
  end
29
28
 
30
29
  private
31
30
 
31
+ # @param stream [PgEventstore::Stream]
32
+ # @param events [Array<PgEventstore::Event>]
33
+ # @return [void]
34
+ def create_partitions(stream, events)
35
+ missing_event_types = events.map(&:type).map(&:to_s).uniq.select do |event_type|
36
+ queries.partitions.partition_required?(stream, event_type)
37
+ end
38
+ raise MissingPartitions.new(stream, missing_event_types) if missing_event_types.any?
39
+ end
40
+
32
41
  # @param revision [Integer]
33
42
  # @param expected_revision [Symbol, Integer]
34
43
  # @param stream [PgEventstore::Stream]
@@ -46,12 +55,12 @@ module PgEventstore
46
55
  end
47
56
 
48
57
  in [Integer, Symbol]
49
- if revision == Stream::INITIAL_STREAM_REVISION && expected_revision == :stream_exists
58
+ if revision == Stream::NON_EXISTING_STREAM_REVISION && expected_revision == :stream_exists
50
59
  raise WrongExpectedRevisionError.new(
51
60
  revision: revision, expected_revision: expected_revision, stream: stream
52
61
  )
53
62
  end
54
- if revision > Stream::INITIAL_STREAM_REVISION && expected_revision == :no_stream
63
+ if revision > Stream::NON_EXISTING_STREAM_REVISION && expected_revision == :no_stream
55
64
  raise WrongExpectedRevisionError.new(
56
65
  revision: revision, expected_revision: expected_revision, stream: stream
57
66
  )
@@ -13,7 +13,7 @@ module PgEventstore
13
13
  # @raise [PgEventstore::WrongExpectedRevisionError]
14
14
  # @raise [PgEventstore::NotPersistedEventError]
15
15
  def call(stream, *events, options: {})
16
- events.each(&method(:check_id_presence))
16
+ events.each(&method(:check_event_presence))
17
17
  append_cmd = Append.new(queries)
18
18
  append_cmd.call(stream, *events, options: options, event_modifier: EventModifiers::PrepareLinkEvent)
19
19
  end
@@ -23,8 +23,8 @@ module PgEventstore
23
23
  # Checks if Event#id is present. An event must have the #id value in order to be linked.
24
24
  # @param event [PgEventstore::Event]
25
25
  # @return [void]
26
- def check_id_presence(event)
27
- return unless event.id.nil?
26
+ def check_event_presence(event)
27
+ return if queries.events.event_exists?(event.id)
28
28
 
29
29
  raise NotPersistedEventError, event
30
30
  end
@@ -15,7 +15,7 @@ module PgEventstore
15
15
  # @return [Array<PgEventstore::Event>]
16
16
  # @raise [PgEventstore::StreamNotFoundError]
17
17
  def call(stream, options: {})
18
- stream = queries.streams.find_stream(stream) || raise(StreamNotFoundError, stream) unless stream.all_stream?
18
+ queries.events.stream_revision(stream) || raise(StreamNotFoundError, stream) unless stream.all_stream?
19
19
 
20
20
  queries.events.stream_events(stream, options)
21
21
  end
@@ -58,9 +58,11 @@ module PgEventstore
58
58
 
59
59
  # @return [String]
60
60
  def user_friendly_message
61
- return expected_stream_exists if revision == -1 && expected_revision == :stream_exists
62
- return expected_no_stream if revision > -1 && expected_revision == :no_stream
63
- return current_no_stream if revision == -1 && expected_revision.is_a?(Integer)
61
+ if revision == Stream::NON_EXISTING_STREAM_REVISION && expected_revision == :stream_exists
62
+ return expected_stream_exists
63
+ end
64
+ return expected_no_stream if revision > Stream::NON_EXISTING_STREAM_REVISION && expected_revision == :no_stream
65
+ return current_no_stream if revision == Stream::NON_EXISTING_STREAM_REVISION && expected_revision.is_a?(Integer)
64
66
 
65
67
  unmatched_stream_revision
66
68
  end
@@ -189,9 +191,18 @@ module PgEventstore
189
191
 
190
192
  # @return [String]
191
193
  def user_friendly_message
192
- <<~TEXT.strip
193
- Event#id must be present, got #{event.id.inspect} instead.
194
- TEXT
194
+ "Event with #id #{event.id.inspect} must be present, but it could not be found."
195
+ end
196
+ end
197
+
198
+ class MissingPartitions < Error
199
+ attr_reader :stream, :event_types
200
+
201
+ # @param stream [PgEventstore::Stream]
202
+ # @param event_types [Array<String>]
203
+ def initialize(stream, event_types)
204
+ @stream = stream
205
+ @event_types = event_types
195
206
  end
196
207
  end
197
208
  end
@@ -24,7 +24,9 @@ module PgEventstore
24
24
  middlewares.each do |middleware|
25
25
  middleware.deserialize(event)
26
26
  end
27
- event.stream = PgEventstore::Stream.new(**attrs['stream'].transform_keys(&:to_sym)) if attrs.key?('stream')
27
+ event.stream = PgEventstore::Stream.new(
28
+ **attrs.slice('context', 'stream_name', 'stream_id').transform_keys(&:to_sym)
29
+ )
28
30
  event
29
31
  end
30
32
 
@@ -15,46 +15,89 @@ module PgEventstore
15
15
  @deserializer = deserializer
16
16
  end
17
17
 
18
+ # @param id [String, nil]
19
+ # @return [Boolean]
20
+ def event_exists?(id)
21
+ return false if id.nil?
22
+
23
+ sql_builder = SQLBuilder.new.select('1 as exists').from('events').where('id = ?', id).limit(1)
24
+ connection.with do |conn|
25
+ conn.exec_params(*sql_builder.to_exec_params)
26
+ end.to_a.dig(0, 'exists') == 1
27
+ end
28
+
29
+ # @param stream [PgEventstore::Stream]
30
+ # @return [Integer, nil]
31
+ def stream_revision(stream)
32
+ sql_builder = SQLBuilder.new.from('events').select('stream_revision').
33
+ where('context = ? and stream_name = ? and stream_id = ?', *stream.to_a).
34
+ order('stream_revision DESC').
35
+ limit(1)
36
+ connection.with do |conn|
37
+ conn.exec_params(*sql_builder.to_exec_params)
38
+ end.to_a.dig(0, 'stream_revision')
39
+ end
40
+
18
41
  # @see PgEventstore::Client#read for more info
19
42
  # @param stream [PgEventstore::Stream]
20
43
  # @param options [Hash]
21
44
  # @return [Array<PgEventstore::Event>]
22
45
  def stream_events(stream, options)
23
- options = event_type_queries.include_event_types_ids(options)
24
46
  exec_params = events_filtering(stream, options).to_exec_params
25
47
  raw_events = connection.with do |conn|
26
48
  conn.exec_params(*exec_params)
27
49
  end.to_a
28
- preloader.preload_related_objects(raw_events)
29
50
  deserializer.deserialize_many(raw_events)
30
51
  end
31
52
 
32
- # @param stream [PgEventstore::Stream] persisted stream
33
- # @param event [PgEventstore::Event]
53
+ # @param stream [PgEventstore::Stream]
54
+ # @param events [Array<PgEventstore::Event>]
34
55
  # @return [PgEventstore::Event]
35
- def insert(stream, event)
36
- serializer.serialize(event)
37
-
38
- attributes = event.options_hash.slice(:id, :data, :metadata, :stream_revision, :link_id).compact
39
- attributes[:stream_id] = stream.id
40
- attributes[:event_type_id] = event_type_queries.find_or_create_type(event.type)
56
+ def insert(stream, events)
57
+ sql_rows_for_insert, values = prepared_statements(stream, events)
58
+ columns = %w[id data metadata stream_revision link_id type context stream_name stream_id]
41
59
 
42
60
  sql = <<~SQL
43
- INSERT INTO events (#{attributes.keys.join(', ')})
44
- VALUES (#{Utils.positional_vars(attributes.values)})
45
- RETURNING *, $#{attributes.values.size + 1} as type
61
+ INSERT INTO events (#{columns.join(', ')})
62
+ VALUES #{sql_rows_for_insert.join(", ")}
63
+ RETURNING *
46
64
  SQL
47
65
 
48
- raw_event = connection.with do |conn|
49
- conn.exec_params(sql, [*attributes.values, event.type])
50
- end.to_a.first
51
- deserializer.without_middlewares.deserialize(raw_event).tap do |persisted_event|
52
- persisted_event.stream = stream
66
+ connection.with do |conn|
67
+ conn.exec_params(sql, values)
68
+ end.map do |raw_event|
69
+ deserializer.without_middlewares.deserialize(raw_event)
53
70
  end
54
71
  end
55
72
 
56
73
  private
57
74
 
75
+ # @param stream [PgEventstore::Stream]
76
+ # @param events [Array<PgEventstore::Event>]
77
+ # @return [Array<Array<String>, Array<Object>>]
78
+ def prepared_statements(stream, events)
79
+ positional_counter = 1
80
+ values = []
81
+ sql_rows_for_insert = events.map do |event|
82
+ event = serializer.serialize(event)
83
+ attributes = event.options_hash.slice(:id, :data, :metadata, :stream_revision, :link_id, :type)
84
+
85
+ attributes = attributes.merge(stream.to_hash)
86
+ prepared = attributes.values.map do |value|
87
+ if value.nil?
88
+ 'DEFAULT'
89
+ else
90
+ "$#{positional_counter}".tap do
91
+ values.push(value)
92
+ positional_counter += 1
93
+ end
94
+ end
95
+ end
96
+ "(#{prepared.join(',')})"
97
+ end
98
+ [sql_rows_for_insert, values]
99
+ end
100
+
58
101
  # @param stream [PgEventstore::Stream]
59
102
  # @param options [Hash]
60
103
  # @return [PgEventstore::EventsFilteringQuery]
@@ -63,15 +106,5 @@ module PgEventstore
63
106
 
64
107
  QueryBuilders::EventsFiltering.specific_stream_filtering(stream, options)
65
108
  end
66
-
67
- # @return [PgEventstore::EventTypeQueries]
68
- def event_type_queries
69
- EventTypeQueries.new(connection)
70
- end
71
-
72
- # @return [PgEventstore::Preloader]
73
- def preloader
74
- Preloader.new(connection)
75
- end
76
109
  end
77
110
  end
@@ -0,0 +1,184 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PgEventstore
4
+ # @!visibility private
5
+ class PartitionQueries
6
+ attr_reader :connection
7
+ private :connection
8
+
9
+ # @param connection [PgEventstore::Connection]
10
+ def initialize(connection)
11
+ @connection = connection
12
+ end
13
+
14
+ # @param stream [PgEventstore::Stream]
15
+ # @return [Hash] partition attributes
16
+ def create_context_partition(stream)
17
+ attributes = { context: stream.context, table_name: context_partition_name(stream) }
18
+
19
+ loop do
20
+ break unless partition_name_taken?(attributes[:table_name])
21
+
22
+ attributes[:table_name] = attributes[:table_name].next
23
+ end
24
+
25
+ partition_sql = <<~SQL
26
+ INSERT INTO partitions (#{attributes.keys.join(', ')})
27
+ VALUES (#{Utils.positional_vars(attributes.values)}) RETURNING *
28
+ SQL
29
+ partition = connection.with do |conn|
30
+ conn.exec_params(partition_sql, [*attributes.values])
31
+ end.to_a.first
32
+ connection.with do |conn|
33
+ conn.exec(<<~SQL)
34
+ CREATE TABLE #{attributes[:table_name]} PARTITION OF events
35
+ FOR VALUES IN('#{conn.escape_string(stream.context)}') PARTITION BY LIST (stream_name)
36
+ SQL
37
+ end
38
+ partition
39
+ end
40
+
41
+ # @param stream [PgEventstore::Stream]
42
+ # @param context_partition_name [String]
43
+ # @return [Hash] partition attributes
44
+ def create_stream_name_partition(stream, context_partition_name)
45
+ attributes = {
46
+ context: stream.context, stream_name: stream.stream_name, table_name: stream_name_partition_name(stream)
47
+ }
48
+
49
+ loop do
50
+ break unless partition_name_taken?(attributes[:table_name])
51
+
52
+ attributes[:table_name] = attributes[:table_name].next
53
+ end
54
+
55
+ partition_sql = <<~SQL
56
+ INSERT INTO partitions (#{attributes.keys.join(', ')})
57
+ VALUES (#{Utils.positional_vars(attributes.values)}) RETURNING *
58
+ SQL
59
+ partition = connection.with do |conn|
60
+ conn.exec_params(partition_sql, [*attributes.values])
61
+ end.to_a.first
62
+ connection.with do |conn|
63
+ conn.exec(<<~SQL)
64
+ CREATE TABLE #{attributes[:table_name]} PARTITION OF #{context_partition_name}
65
+ FOR VALUES IN('#{conn.escape_string(stream.stream_name)}') PARTITION BY LIST (type)
66
+ SQL
67
+ end
68
+ partition
69
+ end
70
+
71
+ # @param stream [PgEventstore::Stream]
72
+ # @param stream_name_partition_name [String]
73
+ # @return [Hash] partition attributes
74
+ def create_event_type_partition(stream, event_type, stream_name_partition_name)
75
+ attributes = {
76
+ context: stream.context, stream_name: stream.stream_name, event_type: event_type,
77
+ table_name: event_type_partition_name(stream, event_type)
78
+ }
79
+
80
+ loop do
81
+ break unless partition_name_taken?(attributes[:table_name])
82
+
83
+ attributes[:table_name] = attributes[:table_name].next
84
+ end
85
+
86
+ partition_sql = <<~SQL
87
+ INSERT INTO partitions (#{attributes.keys.join(', ')})
88
+ VALUES (#{Utils.positional_vars(attributes.values)}) RETURNING *
89
+ SQL
90
+ partition = connection.with do |conn|
91
+ conn.exec_params(partition_sql, [*attributes.values])
92
+ end.to_a.first
93
+ connection.with do |conn|
94
+ conn.exec(<<~SQL)
95
+ CREATE TABLE #{attributes[:table_name]} PARTITION OF #{stream_name_partition_name}
96
+ FOR VALUES IN('#{conn.escape_string(event_type)}')
97
+ SQL
98
+ end
99
+ partition
100
+ end
101
+
102
+ # @param stream [PgEventstore::Stream]
103
+ # @param event_type [String]
104
+ # @return [Boolean]
105
+ def partition_required?(stream, event_type)
106
+ event_type_partition(stream, event_type).nil?
107
+ end
108
+
109
+ # @param stream [PgEventstore::Stream]
110
+ # @param event_type [String]
111
+ # @return [void]
112
+ def create_partitions(stream, event_type)
113
+ return unless partition_required?(stream, event_type)
114
+
115
+ context_partition = context_partition(stream) || create_context_partition(stream)
116
+ stream_name_partition = stream_name_partition(stream) ||
117
+ create_stream_name_partition(stream, context_partition['table_name'])
118
+
119
+ create_event_type_partition(stream, event_type, stream_name_partition['table_name'])
120
+ end
121
+
122
+ # @param stream [PgEventstore::Stream]
123
+ # @return [Hash, nil] partition attributes
124
+ def context_partition(stream)
125
+ connection.with do |conn|
126
+ conn.exec_params(
127
+ 'select * from partitions where context = $1 and stream_name is null and event_type is null',
128
+ [stream.context]
129
+ )
130
+ end.first
131
+ end
132
+
133
+ # @param stream [PgEventstore::Stream]
134
+ # @return [Hash, nil] partition attributes
135
+ def stream_name_partition(stream)
136
+ connection.with do |conn|
137
+ conn.exec_params(
138
+ <<~SQL,
139
+ select * from partitions where context = $1 and stream_name = $2 and event_type is null
140
+ SQL
141
+ [stream.context, stream.stream_name]
142
+ )
143
+ end.first
144
+ end
145
+
146
+ # @param stream [PgEventstore::Stream]
147
+ # @param event_type [String]
148
+ # @return [Hash, nil] partition attributes
149
+ def event_type_partition(stream, event_type)
150
+ connection.with do |conn|
151
+ conn.exec_params(
152
+ <<~SQL,
153
+ select * from partitions where context = $1 and stream_name = $2 and event_type = $3
154
+ SQL
155
+ [stream.context, stream.stream_name, event_type]
156
+ )
157
+ end.first
158
+ end
159
+
160
+ # @param table_name [String]
161
+ # @return [Boolean]
162
+ def partition_name_taken?(table_name)
163
+ connection.with do |conn|
164
+ conn.exec_params('select 1 as exists from partitions where table_name = $1', [table_name])
165
+ end.to_a.dig(0, 'exists') == 1
166
+ end
167
+
168
+ # @param stream [PgEventstore::Stream]
169
+ # @return [String]
170
+ def context_partition_name(stream)
171
+ "contexts_#{Digest::MD5.hexdigest(stream.context)[0..5]}"
172
+ end
173
+
174
+ # @param stream [PgEventstore::Stream]
175
+ # @return [String]
176
+ def stream_name_partition_name(stream)
177
+ "stream_names_#{Digest::MD5.hexdigest("#{stream.context}-#{stream.stream_name}")[0..5]}"
178
+ end
179
+
180
+ def event_type_partition_name(stream, event_type)
181
+ "event_types_#{Digest::MD5.hexdigest("#{stream.context}-#{stream.stream_name}-#{event_type}")[0..5]}"
182
+ end
183
+ end
184
+ end
@@ -79,10 +79,9 @@ module PgEventstore
79
79
  return [] if query_options.empty?
80
80
 
81
81
  final_builder = union_builders(query_options.map { |id, opts| query_builder(id, opts) })
82
- raw_events = connection.with do |conn|
82
+ connection.with do |conn|
83
83
  conn.exec_params(*final_builder.to_exec_params)
84
84
  end.to_a
85
- preloader.preload_related_objects(raw_events)
86
85
  end
87
86
 
88
87
  # @param id [Integer] subscription's id
@@ -128,9 +127,7 @@ module PgEventstore
128
127
  # @param options [Hash] query options
129
128
  # @return [PgEventstore::SQLBuilder]
130
129
  def query_builder(id, options)
131
- builder = PgEventstore::QueryBuilders::EventsFiltering.subscriptions_events_filtering(
132
- event_type_queries.include_event_types_ids(options)
133
- ).to_sql_builder
130
+ builder = PgEventstore::QueryBuilders::EventsFiltering.subscriptions_events_filtering(options).to_sql_builder
134
131
  builder.select("#{id} as runner_id")
135
132
  end
136
133
 
@@ -152,11 +149,6 @@ module PgEventstore
152
149
  EventTypeQueries.new(connection)
153
150
  end
154
151
 
155
- # @return [PgEventstore::Preloader]
156
- def preloader
157
- Preloader.new(connection)
158
- end
159
-
160
152
  # @param hash [Hash]
161
153
  # @return [Hash]
162
154
  def deserialize(hash)
@@ -36,6 +36,19 @@ module PgEventstore
36
36
  end
37
37
  rescue PG::TRSerializationFailure, PG::TRDeadlockDetected
38
38
  retry
39
+ rescue MissingPartitions => error
40
+ error.event_types.each do |event_type|
41
+ transaction do
42
+ partition_queries.create_partitions(error.stream, event_type)
43
+ end
44
+ rescue PG::UniqueViolation
45
+ retry
46
+ end
47
+ retry
48
+ end
49
+
50
+ def partition_queries
51
+ PartitionQueries.new(connection)
39
52
  end
40
53
  end
41
54
  end
@@ -4,13 +4,11 @@ require_relative 'sql_builder'
4
4
  require_relative 'query_builders/events_filtering_query'
5
5
  require_relative 'queries/transaction_queries'
6
6
  require_relative 'queries/event_queries'
7
- require_relative 'queries/stream_queries'
8
- require_relative 'queries/event_type_queries'
7
+ require_relative 'queries/partition_queries'
9
8
  require_relative 'queries/subscription_queries'
10
9
  require_relative 'queries/subscriptions_set_queries'
11
10
  require_relative 'queries/subscription_command_queries'
12
11
  require_relative 'queries/subscriptions_set_command_queries'
13
- require_relative 'queries/preloader'
14
12
 
15
13
  module PgEventstore
16
14
  # @!visibility private
@@ -20,9 +18,9 @@ module PgEventstore
20
18
  # @!attribute events
21
19
  # @return [PgEventstore::EventQueries, nil]
22
20
  attribute(:events)
23
- # @!attribute streams
24
- # @return [PgEventstore::StreamQueries, nil]
25
- attribute(:streams)
21
+ # @!attribute partitions
22
+ # @return [PgEventstore::PartitionQueries, nil]
23
+ attribute(:partitions)
26
24
  # @!attribute transactions
27
25
  # @return [PgEventstore::TransactionQueries, nil]
28
26
  attribute(:transactions)
@@ -28,7 +28,7 @@ module PgEventstore
28
28
  # @return [PgEventstore::QueryBuilders::EventsFiltering]
29
29
  def all_stream_filtering(options)
30
30
  event_filter = new
31
- options in { filter: { event_type_ids: Array => event_type_ids } }
31
+ options in { filter: { event_types: Array => event_type_ids } }
32
32
  event_filter.add_event_types(event_type_ids)
33
33
  event_filter.add_limit(options[:max_count])
34
34
  event_filter.resolve_links(options[:resolve_link_tos])
@@ -44,11 +44,11 @@ module PgEventstore
44
44
  # @return [PgEventstore::QueryBuilders::EventsFiltering]
45
45
  def specific_stream_filtering(stream, options)
46
46
  event_filter = new
47
- options in { filter: { event_type_ids: Array => event_type_ids } }
47
+ options in { filter: { event_types: Array => event_type_ids } }
48
48
  event_filter.add_event_types(event_type_ids)
49
49
  event_filter.add_limit(options[:max_count])
50
50
  event_filter.resolve_links(options[:resolve_link_tos])
51
- event_filter.add_stream(stream)
51
+ event_filter.add_stream_attrs(**stream.to_hash)
52
52
  event_filter.add_revision(options[:from_revision], options[:direction])
53
53
  event_filter.add_stream_direction(options[:direction])
54
54
  event_filter
@@ -60,8 +60,6 @@ module PgEventstore
60
60
  SQLBuilder.new.
61
61
  select('events.*').
62
62
  from('events').
63
- join('JOIN streams ON streams.id = events.stream_id').
64
- join('JOIN event_types ON event_types.id = events.event_type_id').
65
63
  limit(DEFAULT_LIMIT)
66
64
  end
67
65
 
@@ -75,27 +73,21 @@ module PgEventstore
75
73
 
76
74
  stream_attrs.compact!
77
75
  sql = stream_attrs.map do |attr, _|
78
- "streams.#{attr} = ?"
76
+ "events.#{attr} = ?"
79
77
  end.join(" AND ")
80
78
  @sql_builder.where_or(sql, *stream_attrs.values)
81
79
  end
82
80
 
83
- # @param stream [PgEventstore::Stream]
81
+ # @param event_types [Array<String>, nil]
84
82
  # @return [void]
85
- def add_stream(stream)
86
- @sql_builder.where("streams.id = ?", stream.id)
87
- end
88
-
89
- # @param event_type_ids [Array<Integer>, nil]
90
- # @return [void]
91
- def add_event_types(event_type_ids)
92
- return if event_type_ids.nil?
93
- return if event_type_ids.empty?
83
+ def add_event_types(event_types)
84
+ return if event_types.nil?
85
+ return if event_types.empty?
94
86
 
95
- sql = event_type_ids.size.times.map do
87
+ sql = event_types.size.times.map do
96
88
  "?"
97
89
  end.join(", ")
98
- @sql_builder.where("events.event_type_id IN (#{sql})", *event_type_ids)
90
+ @sql_builder.where("events.type IN (#{sql})", *event_types)
99
91
  end
100
92
 
101
93
  # @param revision [Integer, nil]
@@ -4,10 +4,25 @@ module PgEventstore
4
4
  module TestHelpers
5
5
  class << self
6
6
  def clean_up_db
7
+ clean_up_data
8
+ clean_up_partitions
9
+ end
10
+
11
+ def clean_up_partitions
12
+ PgEventstore.connection.with do |conn|
13
+ # Dropping parent partition also drops all child partitions
14
+ conn.exec("select tablename from pg_tables where tablename like 'contexts_%'").each do |attrs|
15
+ conn.exec("drop table #{attrs['tablename']}")
16
+ end
17
+ end
18
+ end
19
+
20
+ def clean_up_data
7
21
  tables_to_purge = PgEventstore.connection.with do |conn|
8
22
  conn.exec(<<~SQL)
9
23
  SELECT tablename
10
- FROM pg_catalog.pg_tables WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND tablename != 'migrations'
24
+ FROM pg_catalog.pg_tables
25
+ WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND tablename != 'migrations'
11
26
  SQL
12
27
  end.map { |attrs| attrs['tablename'] }
13
28
  tables_to_purge.each do |table_name|
@@ -9,6 +9,7 @@ module PgEventstore
9
9
  @from_value = nil
10
10
  @where_values = { 'AND' => [], 'OR' => [] }
11
11
  @join_values = []
12
+ @group_values = []
12
13
  @order_values = []
13
14
  @limit_value = nil
14
15
  @offset_value = nil
@@ -68,6 +69,11 @@ module PgEventstore
68
69
  self
69
70
  end
70
71
 
72
+ def remove_order
73
+ @order_values.clear
74
+ self
75
+ end
76
+
71
77
  # @param limit [Integer]
72
78
  # @return self
73
79
  def limit(limit)
@@ -75,6 +81,11 @@ module PgEventstore
75
81
  self
76
82
  end
77
83
 
84
+ def remove_limit
85
+ @limit_value = nil
86
+ self
87
+ end
88
+
78
89
  # @param offset [Integer]
79
90
  # @return self
80
91
  def offset(offset)
@@ -89,10 +100,22 @@ module PgEventstore
89
100
  self
90
101
  end
91
102
 
92
- def to_exec_params
93
- return [single_query_sql, @positional_values] if @union_values.empty?
103
+ # @param sql [String]
104
+ # @return self
105
+ def group(sql)
106
+ @group_values.push(sql)
107
+ self
108
+ end
94
109
 
95
- [union_query_sql, @positional_values]
110
+ def remove_group
111
+ @group_values.clear
112
+ self
113
+ end
114
+
115
+ def to_exec_params
116
+ @positional_values.clear
117
+ @positional_values_size = 0
118
+ _to_exec_params
96
119
  end
97
120
 
98
121
  protected
@@ -106,6 +129,12 @@ module PgEventstore
106
129
  @positional_values_size = val
107
130
  end
108
131
 
132
+ def _to_exec_params
133
+ return [single_query_sql, @positional_values] if @union_values.empty?
134
+
135
+ [union_query_sql, @positional_values]
136
+ end
137
+
109
138
  private
110
139
 
111
140
  # @return [String]
@@ -114,6 +143,7 @@ module PgEventstore
114
143
  sql = "SELECT #{select_sql} FROM #{@from_value}"
115
144
  sql += " #{join_sql}" unless @join_values.empty?
116
145
  sql += " WHERE #{where_sql}" unless where_sql.empty?
146
+ sql += " GROUP BY #{@group_values.join(', ')}" unless @group_values.empty?
117
147
  sql += " ORDER BY #{order_sql}" unless @order_values.empty?
118
148
  sql += " LIMIT #{@limit_value}" if @limit_value
119
149
  sql += " OFFSET #{@offset_value}" if @offset_value
@@ -126,7 +156,7 @@ module PgEventstore
126
156
  union_parts = ["(#{sql})"]
127
157
  union_parts += @union_values.map do |builder|
128
158
  builder.positional_values_size = @positional_values_size
129
- builder_sql, values = builder.to_exec_params
159
+ builder_sql, values = builder._to_exec_params
130
160
  @positional_values.push(*values)
131
161
  @positional_values_size += values.size
132
162
  "(#{builder_sql})"
@@ -5,7 +5,7 @@ require 'digest/md5'
5
5
  module PgEventstore
6
6
  class Stream
7
7
  SYSTEM_STREAM_PREFIX = '$'
8
- INITIAL_STREAM_REVISION = -1 # this is the default value of streams.stream_revision column
8
+ NON_EXISTING_STREAM_REVISION = -1
9
9
 
10
10
  class << self
11
11
  # Produces "all" stream instance. "all" stream does not represent any specific stream. Instead, it indicates that
@@ -18,20 +18,15 @@ module PgEventstore
18
18
  end
19
19
  end
20
20
 
21
- attr_reader :context, :stream_name, :stream_id, :id
22
- attr_accessor :stream_revision
21
+ attr_reader :context, :stream_name, :stream_id
23
22
 
24
23
  # @param context [String]
25
24
  # @param stream_name [String]
26
25
  # @param stream_id [String]
27
- # @param id [Integer, nil] internal stream's id, read only
28
- # @param stream_revision [Integer, nil] current stream revision, read only
29
- def initialize(context:, stream_name:, stream_id:, id: nil, stream_revision: nil)
26
+ def initialize(context:, stream_name:, stream_id:)
30
27
  @context = context
31
28
  @stream_name = stream_name
32
29
  @stream_id = stream_id
33
- @id = id
34
- @stream_revision = stream_revision
35
30
  end
36
31
 
37
32
  # @return [Boolean]
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module PgEventstore
4
- VERSION = "0.9.0"
4
+ VERSION = "0.10.1"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: pg_eventstore
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.0
4
+ version: 0.10.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Ivan Dzyzenko
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-02-23 00:00:00.000000000 Z
11
+ date: 2024-03-12 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: pg
@@ -50,15 +50,15 @@ files:
50
50
  - LICENSE.txt
51
51
  - README.md
52
52
  - db/migrations/0_create_extensions.sql
53
- - db/migrations/1_create_streams.sql
54
- - db/migrations/2_create_event_types.sql
55
- - db/migrations/3_create_events.sql
56
- - db/migrations/4_create_subscriptions.sql
57
- - db/migrations/5_create_subscription_commands.sql
58
- - db/migrations/6_create_subscriptions_set_commands.sql
53
+ - db/migrations/1_create_events.sql
54
+ - db/migrations/2_create_subscriptions.sql
55
+ - db/migrations/3_create_subscription_commands.sql
56
+ - db/migrations/4_create_subscriptions_set_commands.sql
57
+ - db/migrations/5_partitions.sql
59
58
  - docs/appending_events.md
60
59
  - docs/configuration.md
61
60
  - docs/events_and_streams.md
61
+ - docs/how_it_works.md
62
62
  - docs/linking_events.md
63
63
  - docs/multiple_commands.md
64
64
  - docs/reading_events.md
@@ -91,9 +91,7 @@ files:
91
91
  - lib/pg_eventstore/pg_connection.rb
92
92
  - lib/pg_eventstore/queries.rb
93
93
  - lib/pg_eventstore/queries/event_queries.rb
94
- - lib/pg_eventstore/queries/event_type_queries.rb
95
- - lib/pg_eventstore/queries/preloader.rb
96
- - lib/pg_eventstore/queries/stream_queries.rb
94
+ - lib/pg_eventstore/queries/partition_queries.rb
97
95
  - lib/pg_eventstore/queries/subscription_command_queries.rb
98
96
  - lib/pg_eventstore/queries/subscription_queries.rb
99
97
  - lib/pg_eventstore/queries/subscriptions_set_command_queries.rb
@@ -1,13 +0,0 @@
1
- CREATE TABLE public.streams
2
- (
3
- id bigserial NOT NULL,
4
- context character varying COLLATE "POSIX" NOT NULL,
5
- stream_name character varying COLLATE "POSIX" NOT NULL,
6
- stream_id character varying COLLATE "POSIX" NOT NULL,
7
- stream_revision integer DEFAULT '-1'::integer NOT NULL
8
- );
9
-
10
- ALTER TABLE ONLY public.streams
11
- ADD CONSTRAINT streams_pkey PRIMARY KEY (id);
12
-
13
- CREATE UNIQUE INDEX idx_streams_context_and_stream_name_and_stream_id ON public.streams USING btree (context, stream_name, stream_id);
@@ -1,10 +0,0 @@
1
- CREATE TABLE public.event_types
2
- (
3
- id bigserial NOT NULL,
4
- type character varying COLLATE "POSIX" NOT NULL
5
- );
6
-
7
- ALTER TABLE ONLY public.event_types
8
- ADD CONSTRAINT event_types_pkey PRIMARY KEY (id);
9
-
10
- CREATE UNIQUE INDEX idx_event_types_type ON public.event_types USING btree (type);
@@ -1,74 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module PgEventstore
4
- # @!visibility private
5
- class EventTypeQueries
6
- attr_reader :connection
7
- private :connection
8
-
9
- # @param connection [PgEventstore::Connection]
10
- def initialize(connection)
11
- @connection = connection
12
- end
13
-
14
- # @param type [String]
15
- # @return [Integer] event type's id
16
- def find_or_create_type(type)
17
- find_type(type) || create_type(type)
18
- end
19
-
20
- # @param type [String]
21
- # @return [Integer, nil] event type's id
22
- def find_type(type)
23
- connection.with do |conn|
24
- conn.exec_params('SELECT id FROM event_types WHERE type = $1', [type])
25
- end.to_a.dig(0, 'id')
26
- end
27
-
28
- # @param type [String]
29
- # @return [Integer] event type's id
30
- def create_type(type)
31
- connection.with do |conn|
32
- conn.exec_params('INSERT INTO event_types (type) VALUES ($1) RETURNING id', [type])
33
- end.to_a.dig(0, 'id')
34
- end
35
-
36
- # @param ids [Array<Integer>]
37
- # @return [Array<Hash>]
38
- def find_by_ids(ids)
39
- return [] if ids.empty?
40
-
41
- builder = SQLBuilder.new.from('event_types').where('id = ANY(?)', ids.uniq)
42
- connection.with do |conn|
43
- conn.exec_params(*builder.to_exec_params)
44
- end.to_a
45
- end
46
-
47
- # @param types [Array<String>]
48
- # @return [Array<Integer, nil>]
49
- def find_event_types(types)
50
- connection.with do |conn|
51
- conn.exec_params(<<~SQL, [types])
52
- SELECT event_types.id, types.type
53
- FROM event_types
54
- RIGHT JOIN (
55
- SELECT unnest($1::varchar[]) as type
56
- ) types ON types.type = event_types.type
57
- SQL
58
- end.to_a.map { |attrs| attrs['id'] }
59
- end
60
-
61
- # Replaces filter by event type strings with filter by event type ids
62
- # @param options [Hash]
63
- # @return [Hash]
64
- def include_event_types_ids(options)
65
- options in { filter: { event_types: Array => event_types } }
66
- return options unless event_types
67
-
68
- options = Utils.deep_dup(options)
69
- options[:filter][:event_type_ids] = find_event_types(event_types).uniq
70
- options[:filter].delete(:event_types)
71
- options
72
- end
73
- end
74
- end
@@ -1,37 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module PgEventstore
4
- # @!visibility private
5
- class Preloader
6
- attr_reader :connection
7
- private :connection
8
-
9
- # @param connection [PgEventstore::Connection]
10
- def initialize(connection)
11
- @connection = connection
12
- end
13
-
14
- # @param raw_events [Array<Hash>]
15
- # @return [Array<Hash>]
16
- def preload_related_objects(raw_events)
17
- streams = stream_queries.find_by_ids(raw_events.map { _1['stream_id'] }).to_h { [_1['id'], _1] }
18
- types = event_type_queries.find_by_ids(raw_events.map { _1['event_type_id'] }).to_h { [_1['id'], _1] }
19
- raw_events.each do |event|
20
- event['stream'] = streams[event['stream_id']]
21
- event['type'] = types[event['event_type_id']]['type']
22
- end
23
- end
24
-
25
- private
26
-
27
- # @return [PgEventstore::EventTypeQueries]
28
- def event_type_queries
29
- EventTypeQueries.new(connection)
30
- end
31
-
32
- # @return [PgEventstore::StreamQueries]
33
- def stream_queries
34
- StreamQueries.new(connection)
35
- end
36
- end
37
- end
@@ -1,77 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module PgEventstore
4
- # @!visibility private
5
- class StreamQueries
6
- attr_reader :connection
7
- private :connection
8
-
9
- # @param connection [PgEventstore::Connection]
10
- def initialize(connection)
11
- @connection = connection
12
- end
13
-
14
- # Finds a stream in the database by the given Stream object
15
- # @param stream [PgEventstore::Stream]
16
- # @return [PgEventstore::Stream, nil] persisted stream
17
- def find_stream(stream)
18
- builder =
19
- SQLBuilder.new.
20
- from('streams').
21
- where('streams.context = ? AND streams.stream_name = ? AND streams.stream_id = ?', *stream.to_a).
22
- limit(1)
23
- pg_result = connection.with do |conn|
24
- conn.exec_params(*builder.to_exec_params)
25
- end
26
- deserialize(pg_result) if pg_result.ntuples == 1
27
- end
28
-
29
- # @param ids [Array<Integer>]
30
- # @return [Array<Hash>]
31
- def find_by_ids(ids)
32
- return [] if ids.empty?
33
-
34
- builder = SQLBuilder.new.from('streams').where('id = ANY(?)', ids.uniq.sort)
35
- connection.with do |conn|
36
- conn.exec_params(*builder.to_exec_params)
37
- end.to_a
38
- end
39
-
40
- # @param stream [PgEventstore::Stream]
41
- # @return [PgEventstore::RawStream] persisted stream
42
- def create_stream(stream)
43
- create_sql = <<~SQL
44
- INSERT INTO streams (context, stream_name, stream_id) VALUES ($1, $2, $3) RETURNING *
45
- SQL
46
- pg_result = connection.with do |conn|
47
- conn.exec_params(create_sql, stream.to_a)
48
- end
49
- deserialize(pg_result)
50
- end
51
-
52
- # @return [PgEventstore::Stream] persisted stream
53
- def find_or_create_stream(stream)
54
- find_stream(stream) || create_stream(stream)
55
- end
56
-
57
- # @param stream [PgEventstore::Stream] persisted stream
58
- # @return [PgEventstore::Stream]
59
- def update_stream_revision(stream, revision)
60
- connection.with do |conn|
61
- conn.exec_params(<<~SQL, [revision, stream.id])
62
- UPDATE streams SET stream_revision = $1 WHERE id = $2
63
- SQL
64
- end
65
- stream.stream_revision = revision
66
- stream
67
- end
68
-
69
- private
70
-
71
- # @param pg_result [PG::Result]
72
- # @return [PgEventstore::Stream, nil]
73
- def deserialize(pg_result)
74
- PgEventstore::Stream.new(**pg_result.to_a.first.transform_keys(&:to_sym))
75
- end
76
- end
77
- end