pg_eventstore 0.10.1 → 1.0.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (83) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +11 -0
  3. data/README.md +26 -0
  4. data/db/migrations/1_create_events.sql +12 -11
  5. data/db/migrations/2_create_subscriptions.sql +6 -2
  6. data/db/migrations/3_create_subscription_commands.sql +9 -5
  7. data/db/migrations/4_create_subscriptions_set_commands.sql +1 -1
  8. data/db/migrations/5_partitions.sql +1 -0
  9. data/docs/configuration.md +1 -1
  10. data/docs/how_it_works.md +14 -1
  11. data/lib/pg_eventstore/commands/append.rb +1 -1
  12. data/lib/pg_eventstore/commands/event_modifiers/prepare_link_event.rb +30 -8
  13. data/lib/pg_eventstore/commands/event_modifiers/prepare_regular_event.rb +8 -10
  14. data/lib/pg_eventstore/commands/link_to.rb +14 -7
  15. data/lib/pg_eventstore/errors.rb +10 -12
  16. data/lib/pg_eventstore/event.rb +9 -1
  17. data/lib/pg_eventstore/event_deserializer.rb +1 -0
  18. data/lib/pg_eventstore/queries/event_queries.rb +33 -6
  19. data/lib/pg_eventstore/queries/links_resolver.rb +53 -0
  20. data/lib/pg_eventstore/queries/partition_queries.rb +8 -0
  21. data/lib/pg_eventstore/queries/subscription_command_queries.rb +27 -7
  22. data/lib/pg_eventstore/queries/subscription_queries.rb +70 -35
  23. data/lib/pg_eventstore/queries/subscriptions_set_command_queries.rb +13 -1
  24. data/lib/pg_eventstore/queries/subscriptions_set_queries.rb +18 -4
  25. data/lib/pg_eventstore/queries.rb +1 -0
  26. data/lib/pg_eventstore/query_builders/events_filtering_query.rb +4 -17
  27. data/lib/pg_eventstore/subscriptions/command_handlers/subscription_feeder_commands.rb +10 -2
  28. data/lib/pg_eventstore/subscriptions/command_handlers/subscription_runners_commands.rb +9 -7
  29. data/lib/pg_eventstore/subscriptions/commands_handler.rb +3 -2
  30. data/lib/pg_eventstore/subscriptions/events_processor.rb +10 -2
  31. data/lib/pg_eventstore/subscriptions/subscription.rb +29 -12
  32. data/lib/pg_eventstore/subscriptions/subscription_feeder.rb +20 -16
  33. data/lib/pg_eventstore/subscriptions/subscription_runner.rb +1 -1
  34. data/lib/pg_eventstore/subscriptions/subscription_runners_feeder.rb +3 -4
  35. data/lib/pg_eventstore/subscriptions/subscriptions_set.rb +22 -1
  36. data/lib/pg_eventstore/version.rb +1 -1
  37. data/lib/pg_eventstore/web/application.rb +180 -0
  38. data/lib/pg_eventstore/web/paginator/base_collection.rb +56 -0
  39. data/lib/pg_eventstore/web/paginator/event_types_collection.rb +50 -0
  40. data/lib/pg_eventstore/web/paginator/events_collection.rb +105 -0
  41. data/lib/pg_eventstore/web/paginator/helpers.rb +119 -0
  42. data/lib/pg_eventstore/web/paginator/stream_contexts_collection.rb +48 -0
  43. data/lib/pg_eventstore/web/paginator/stream_ids_collection.rb +50 -0
  44. data/lib/pg_eventstore/web/paginator/stream_names_collection.rb +51 -0
  45. data/lib/pg_eventstore/web/public/fonts/vendor/FontAwesome.otf +0 -0
  46. data/lib/pg_eventstore/web/public/fonts/vendor/fontawesome-webfont.eot +0 -0
  47. data/lib/pg_eventstore/web/public/fonts/vendor/fontawesome-webfont.svg +685 -0
  48. data/lib/pg_eventstore/web/public/fonts/vendor/fontawesome-webfont.ttf +0 -0
  49. data/lib/pg_eventstore/web/public/fonts/vendor/fontawesome-webfont.woff +0 -0
  50. data/lib/pg_eventstore/web/public/fonts/vendor/fontawesome-webfont.woff2 +0 -0
  51. data/lib/pg_eventstore/web/public/images/favicon.ico +0 -0
  52. data/lib/pg_eventstore/web/public/javascripts/gentelella.js +334 -0
  53. data/lib/pg_eventstore/web/public/javascripts/pg_eventstore.js +162 -0
  54. data/lib/pg_eventstore/web/public/javascripts/vendor/bootstrap.bundle.min.js +7 -0
  55. data/lib/pg_eventstore/web/public/javascripts/vendor/bootstrap.bundle.min.js.map +1 -0
  56. data/lib/pg_eventstore/web/public/javascripts/vendor/jquery.autocomplete.min.js +8 -0
  57. data/lib/pg_eventstore/web/public/javascripts/vendor/jquery.min.js +4 -0
  58. data/lib/pg_eventstore/web/public/javascripts/vendor/jquery.min.js.map +1 -0
  59. data/lib/pg_eventstore/web/public/javascripts/vendor/select2.full.min.js +2 -0
  60. data/lib/pg_eventstore/web/public/stylesheets/pg_eventstore.css +5 -0
  61. data/lib/pg_eventstore/web/public/stylesheets/vendor/bootstrap.min.css +7 -0
  62. data/lib/pg_eventstore/web/public/stylesheets/vendor/bootstrap.min.css.map +1 -0
  63. data/lib/pg_eventstore/web/public/stylesheets/vendor/font-awesome.min.css +4 -0
  64. data/lib/pg_eventstore/web/public/stylesheets/vendor/font-awesome.min.css.map +7 -0
  65. data/lib/pg_eventstore/web/public/stylesheets/vendor/gentelella.min.css +13 -0
  66. data/lib/pg_eventstore/web/public/stylesheets/vendor/select2-bootstrap4.min.css +3 -0
  67. data/lib/pg_eventstore/web/public/stylesheets/vendor/select2.min.css +2 -0
  68. data/lib/pg_eventstore/web/subscriptions/helpers.rb +76 -0
  69. data/lib/pg_eventstore/web/subscriptions/set_collection.rb +34 -0
  70. data/lib/pg_eventstore/web/subscriptions/subscriptions.rb +33 -0
  71. data/lib/pg_eventstore/web/subscriptions/subscriptions_set.rb +33 -0
  72. data/lib/pg_eventstore/web/subscriptions/subscriptions_to_set_association.rb +32 -0
  73. data/lib/pg_eventstore/web/views/home/dashboard.erb +147 -0
  74. data/lib/pg_eventstore/web/views/home/partials/event_filter.erb +15 -0
  75. data/lib/pg_eventstore/web/views/home/partials/events.erb +22 -0
  76. data/lib/pg_eventstore/web/views/home/partials/pagination_links.erb +3 -0
  77. data/lib/pg_eventstore/web/views/home/partials/stream_filter.erb +31 -0
  78. data/lib/pg_eventstore/web/views/layouts/application.erb +116 -0
  79. data/lib/pg_eventstore/web/views/subscriptions/index.erb +220 -0
  80. data/lib/pg_eventstore/web.rb +22 -0
  81. data/lib/pg_eventstore.rb +5 -0
  82. data/pg_eventstore.gemspec +2 -1
  83. metadata +61 -2
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9426880dd6354875d896f5bf37abeac180c4f04b26905db0efc035fec316e6b9
4
- data.tar.gz: 355a6c370451fff9e391aa0ecb1c6abf105c6fc537d99f6b7371359067dd2ee4
3
+ metadata.gz: fbcfe058d0769d031f29580fdd79e899a73987a3108b0b96ad154a5bb913a7d2
4
+ data.tar.gz: 0f071621693bca4b597defc2be13fbde054be06448648ace7324e8ce14e4e512
5
5
  SHA512:
6
- metadata.gz: fc96e847b892e375115a08985678328d166742e5f6be3ee967f67dc808c80166a58f010555567b05fbac172b31c7d98276d1e898de6889d5d53d3ccf0ab2be47
7
- data.tar.gz: cba82b0c20d4104cd6878fd2471e5c4d0c70e60991f573cd5aa1a845e7e5b443cf9f15448b81754888a84883c6bfbaf932e452c13fd7d5d229b03185741f1dbd
6
+ metadata.gz: fa8828e002dab641f15b8ed3756c822899b3937929482a8fb9dcbac4bb5c10b7283427fa462a627ee47209d4476157396749ced15d50b64892683b1309e897a8
7
+ data.tar.gz: 3482263831bc256851999a0093be615cf4e238ceeae5c27f209f2fcc3d8b666db8b4c68bc388e5ef0c52de8f52a65e6abfe4db864042161f823b867f4996e750
data/CHANGELOG.md CHANGED
@@ -1,5 +1,16 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [1.0.0.rc1]
4
+
5
+ - Improve performance of loading original events when resolve_link_tos: true option is provided
6
+ - Adjust `partitions` table indexes
7
+ - Implement admin web UI. So far two pages were implemented - events search and subscriptions
8
+
9
+ ## [0.10.2] - 2024-03-13
10
+
11
+ - Review the approach to resolve link events
12
+ - Fix subscriptions restart interval option not being processed correctly
13
+
3
14
  ## [0.10.1] - 2024-03-12
4
15
 
5
16
  - Handle edge case when creating partitions
data/README.md CHANGED
@@ -47,6 +47,32 @@ Documentation chapters:
47
47
  - [Writing middlewares](docs/writing_middleware.md)
48
48
  - [How to make multiple commands atomic](docs/multiple_commands.md)
49
49
 
50
+ ## Admin web UI
51
+
52
+ `pg_eventstore` implements admin UI where you can browse various database objects. It is implemented as rack application. It doesn't have any authentication/authorization mechanism - it is your responsibility to take care of it.
53
+
54
+ ### Rails integration
55
+
56
+ In your `config/routes.rb`:
57
+
58
+ ```ruby
59
+ require 'pg_eventstore/web'
60
+
61
+ mount PgEventstore::Web::Application, at: '/eventstore'
62
+ ```
63
+
64
+ ### Standalone application
65
+
66
+ Create `config.ru` file and place next content in there:
67
+
68
+ ```ruby
69
+ require 'pg_eventstore/web'
70
+
71
+ run PgEventstore::Web::Application
72
+ ```
73
+
74
+ Now you can use any web server to run it.
75
+
50
76
  ## Development
51
77
 
52
78
  After checking out the repo, run:
@@ -1,16 +1,17 @@
1
1
  CREATE TABLE public.events
2
2
  (
3
- id uuid DEFAULT public.gen_random_uuid() NOT NULL,
4
- context character varying COLLATE "POSIX" NOT NULL,
5
- stream_name character varying COLLATE "POSIX" NOT NULL,
6
- stream_id character varying COLLATE "POSIX" NOT NULL,
7
- global_position bigserial NOT NULL,
8
- stream_revision integer NOT NULL,
9
- data jsonb DEFAULT '{}'::jsonb NOT NULL,
10
- metadata jsonb DEFAULT '{}'::jsonb NOT NULL,
11
- link_id uuid,
12
- created_at timestamp without time zone DEFAULT now() NOT NULL,
13
- type character varying COLLATE "POSIX" NOT NULL
3
+ id uuid DEFAULT public.gen_random_uuid() NOT NULL,
4
+ context character varying COLLATE "POSIX" NOT NULL,
5
+ stream_name character varying COLLATE "POSIX" NOT NULL,
6
+ stream_id character varying COLLATE "POSIX" NOT NULL,
7
+ global_position bigserial NOT NULL,
8
+ stream_revision integer NOT NULL,
9
+ data jsonb DEFAULT '{}'::jsonb NOT NULL,
10
+ metadata jsonb DEFAULT '{}'::jsonb NOT NULL,
11
+ link_id uuid,
12
+ link_partition_id bigint,
13
+ created_at timestamp without time zone DEFAULT now() NOT NULL,
14
+ type character varying COLLATE "POSIX" NOT NULL
14
15
  ) PARTITION BY LIST (context);
15
16
 
16
17
  ALTER TABLE ONLY public.events
@@ -1,6 +1,6 @@
1
1
  CREATE TABLE public.subscriptions_set
2
2
  (
3
- id uuid DEFAULT public.gen_random_uuid() NOT NULL,
3
+ id bigserial NOT NULL,
4
4
  name character varying NOT NULL,
5
5
  state character varying NOT NULL DEFAULT 'initial',
6
6
  restart_count integer NOT NULL DEFAULT 0,
@@ -35,7 +35,7 @@ CREATE TABLE public.subscriptions
35
35
  chunk_query_interval float4 NOT NULL DEFAULT 1.0,
36
36
  last_chunk_fed_at timestamp without time zone NOT NULL DEFAULT to_timestamp(0),
37
37
  last_chunk_greatest_position bigint,
38
- locked_by uuid,
38
+ locked_by bigint,
39
39
  created_at timestamp without time zone NOT NULL DEFAULT now(),
40
40
  updated_at timestamp without time zone NOT NULL DEFAULT now()
41
41
  );
@@ -44,3 +44,7 @@ ALTER TABLE ONLY public.subscriptions
44
44
  ADD CONSTRAINT subscriptions_pkey PRIMARY KEY (id);
45
45
 
46
46
  CREATE UNIQUE INDEX idx_subscriptions_set_and_name ON public.subscriptions USING btree (set, name);
47
+ CREATE INDEX idx_subscriptions_locked_by ON public.subscriptions USING btree (locked_by);
48
+
49
+ ALTER TABLE ONLY public.subscriptions
50
+ ADD CONSTRAINT subscriptions_subscriptions_set_fk FOREIGN KEY (locked_by) REFERENCES public.subscriptions_set (id) ON DELETE SET NULL (locked_by);
@@ -1,15 +1,19 @@
1
1
  CREATE TABLE public.subscription_commands
2
2
  (
3
- id bigserial NOT NULL,
4
- name character varying NOT NULL,
5
- subscription_id bigint NOT NULL,
6
- created_at timestamp without time zone NOT NULL DEFAULT now()
3
+ id bigserial NOT NULL,
4
+ name character varying NOT NULL,
5
+ subscription_id bigint NOT NULL,
6
+ subscriptions_set_id bigint NOT NULL,
7
+ created_at timestamp without time zone NOT NULL DEFAULT now()
7
8
  );
8
9
 
9
10
  ALTER TABLE ONLY public.subscription_commands
10
11
  ADD CONSTRAINT subscription_commands_pkey PRIMARY KEY (id);
11
12
 
12
- CREATE UNIQUE INDEX idx_subscription_commands_subscription_id_and_name ON public.subscription_commands USING btree (subscription_id, name);
13
+ CREATE UNIQUE INDEX idx_subscription_commands_subscription_id_and_set_id_and_name ON public.subscription_commands USING btree (subscription_id, subscriptions_set_id, name);
13
14
 
14
15
  ALTER TABLE ONLY public.subscription_commands
15
16
  ADD CONSTRAINT subscription_commands_subscription_fk FOREIGN KEY (subscription_id) REFERENCES public.subscriptions (id) ON DELETE CASCADE;
17
+
18
+ ALTER TABLE ONLY public.subscription_commands
19
+ ADD CONSTRAINT subscription_commands_subscriptions_set_fk FOREIGN KEY (subscriptions_set_id) REFERENCES public.subscriptions_set (id) ON DELETE CASCADE;
@@ -2,7 +2,7 @@ CREATE TABLE public.subscriptions_set_commands
2
2
  (
3
3
  id bigserial NOT NULL,
4
4
  name character varying NOT NULL,
5
- subscriptions_set_id uuid NOT NULL,
5
+ subscriptions_set_id bigint NOT NULL,
6
6
  created_at timestamp without time zone NOT NULL DEFAULT now()
7
7
  );
8
8
 
@@ -14,3 +14,4 @@ CREATE UNIQUE INDEX idx_partitions_by_context ON public.partitions USING btree (
14
14
  CREATE UNIQUE INDEX idx_partitions_by_context_and_stream_name ON public.partitions USING btree (context, stream_name) WHERE event_type IS NULL;
15
15
  CREATE UNIQUE INDEX idx_partitions_by_context_and_stream_name_and_event_type ON public.partitions USING btree (context, stream_name, event_type);
16
16
  CREATE UNIQUE INDEX idx_partitions_by_partition_table_name ON public.partitions USING btree (table_name);
17
+ CREATE INDEX idx_partitions_by_event_type ON public.partitions USING btree (event_type);
@@ -10,7 +10,7 @@ Configuration options:
10
10
  | event_class_resolver | `#call` | `PgEventstore::EventClassResolver.new` | A `#call`-able object that accepts a string and returns an event's class. See **Resolving events classes** chapter bellow for more info. |
11
11
  | connection_pool_size | Integer | `5` | Max number of connections per ruby process. It must equal the number of threads of your application. When using subscriptions it is recommended to set it to the number of subscriptions divided by two or greater. See [**Picking max connections number**](#picking-max-connections-number) chapter of this section. |
12
12
  | connection_pool_timeout | Integer | `5` | Time in seconds to wait for a connection in the pool to be released. If no connections are available during this time - `ConnectionPool::TimeoutError` will be raised. See `connection_pool` gem [docs](https://github.com/mperham/connection_pool#usage) for more info. |
13
- | subscription_pull_interval | Float | `1.0` | How often to pull new subscription events in seconds. |
13
+ | subscription_pull_interval | Float | `1.0` | How often to pull new subscription events in seconds. The minimum meaningful value is `0.2`. Values less than `0.2` will act as it is `0.2`. |
14
14
  | subscription_max_retries | Integer | `5` | Max number of retries of failed subscription. |
15
15
  | subscription_retries_interval | Integer | `1` | Interval in seconds between retries of failed subscriptions. |
16
16
  | subscriptions_set_max_retries | Integer | `10` | Max number of retries for failed subscription sets. |
data/docs/how_it_works.md CHANGED
@@ -8,7 +8,7 @@ The database is designed specifically for Eventsourcing using Domain-Driven Desi
8
8
  - For each `Stream#stream_name` there is a subpartition of `contexts_` table. Those tables have `stream_names_` prefix.
9
9
  - For each `Event#type` there is a subpartition of `stream_names_` table. Those tables have `event_types_` prefix.
10
10
 
11
- To implement partitions - Declarative Partitioning is used. Partitioning means that you should not have any random values in the combination of `Stream#context`, `Stream#stream_name` and `Event#type`. A combination of those values must have low cardinality(low distinct values number) and must be pre-defined in your application. Otherwise it will lead to the performance degradation. More about PostgreSQL partitions is [here](https://www.postgresql.org/docs/current/ddl-partitioning.html),
11
+ To implement partitions - Declarative Partitioning is used. Partitioning means that you should not have any random values in the combination of `Stream#context`, `Stream#stream_name` and `Event#type`. A combination of those values must have low cardinality(low distinct values number) and must be pre-defined in your application. Otherwise it will lead to the performance degradation. More about PostgreSQL partitions is [here](https://www.postgresql.org/docs/current/ddl-partitioning.html).
12
12
 
13
13
  So, let's say you want to publish next event:
14
14
 
@@ -36,6 +36,19 @@ end.to_a
36
36
  # {"id"=>3, "context"=>"SomeCtx", "stream_name"=>"SomeStream", "event_type"=>"SomethingChanged", "table_name"=>"event_types_aeadd5"}]
37
37
  ```
38
38
 
39
+ ### PostgreSQL settings
40
+
41
+ The more partitions you have, the more locks are required for operations that affect multiple partitions. Especially it concerns the case when you are [reading events from "all" stream](reading_events.md#reading-from-the-all-stream) without providing any filters. It may lead to the next error:
42
+
43
+ ```
44
+ ERROR: out of shared memory (PG::OutOfMemory)
45
+ HINT: You might need to increase max_locks_per_transaction.
46
+ ```
47
+
48
+ PostgreSQL suggests to increase the `max_locks_per_transaction`(the description of it is [here](https://www.postgresql.org/docs/current/runtime-config-locks.html)). The default value is `64`. The good value of this setting really depends on your queries, the number of concurrent transactions, the values of `shared_buffers` and `work_mem` settings. In case you have several thousands of partitions - you may want to set it to `128` or event to `256` from the start. On the other hand - you may want to increase it even earlier(e.g. when having several hundreds of partitions) in case you involve high number of partitions into a single transaction(for example, when using [#multiple](multiple_commands.md)).
49
+
50
+ Conclusion: monitor db logs, monitor exceptions and adjust your db settings accordingly.
51
+
39
52
  ## Appending events and multiple commands
40
53
 
41
54
  You may want to get familiar with [Appending events](appending_events.md) and [multiple commands](multiple_commands.md) first.
@@ -12,7 +12,7 @@ module PgEventstore
12
12
  # @param event_modifier [#call]
13
13
  # @return [Array<PgEventstore::Event>] persisted events
14
14
  # @raise [PgEventstore::WrongExpectedRevisionError]
15
- def call(stream, *events, options: {}, event_modifier: EventModifiers::PrepareRegularEvent)
15
+ def call(stream, *events, options: {}, event_modifier: EventModifiers::PrepareRegularEvent.new)
16
16
  raise SystemStreamError, stream if stream.system?
17
17
 
18
18
  queries.transactions.transaction do
@@ -6,16 +6,38 @@ module PgEventstore
6
6
  # Defines how to transform regular event into a link event
7
7
  # @!visibility private
8
8
  class PrepareLinkEvent
9
- class << self
10
- # @param event [PgEventstore::Event]
11
- # @param revision [Integer]
12
- # @return [PgEventstore::Event]
13
- def call(event, revision)
14
- Event.new(link_id: event.id, type: Event::LINK_TYPE, stream_revision: revision).tap do |e|
15
- %i[link_id type stream_revision].each { |attr| e.readonly!(attr) }
16
- end
9
+ attr_reader :partition_queries, :partitions
10
+
11
+ # @param partition_queries [PgEventstore::PartitionQueries]
12
+ def initialize(partition_queries)
13
+ @partitions = {}
14
+ @partition_queries = partition_queries
15
+ end
16
+ # @param event [PgEventstore::Event]
17
+ # @param revision [Integer]
18
+ # @return [PgEventstore::Event]
19
+ def call(event, revision)
20
+ Event.new(
21
+ link_id: event.id, link_partition_id: partition_id(event), type: Event::LINK_TYPE, stream_revision: revision
22
+ ).tap do |e|
23
+ %i[link_id link_partition_id type stream_revision].each { |attr| e.readonly!(attr) }
17
24
  end
18
25
  end
26
+
27
+ private
28
+
29
+ # @param event [PgEventstore::Event] persisted event
30
+ # @return [Integer] partition id
31
+ # @raise [PgEventstore::MissingPartitionError]
32
+ def partition_id(event)
33
+ partition_id = @partitions.dig(event.stream.context, event.stream.stream_name, event.type)
34
+ return partition_id if partition_id
35
+
36
+ partition_id = partition_queries.event_type_partition(event.stream, event.type)['id']
37
+ @partitions[event.stream.context] ||= {}
38
+ @partitions[event.stream.context][event.stream.stream_name] ||= {}
39
+ @partitions[event.stream.context][event.stream.stream_name][event.type] = partition_id
40
+ end
19
41
  end
20
42
  end
21
43
  end
@@ -6,16 +6,14 @@ module PgEventstore
6
6
  # Defines how to transform regular event before appending it to the stream
7
7
  # @!visibility private
8
8
  class PrepareRegularEvent
9
- class << self
10
- # @param event [PgEventstore::Event]
11
- # @param revision [Integer]
12
- # @return [PgEventstore::Event]
13
- def call(event, revision)
14
- event.class.new(
15
- id: event.id, data: event.data, metadata: event.metadata, type: event.type, stream_revision: revision
16
- ).tap do |e|
17
- %i[link_id stream_revision].each { |attr| e.readonly!(attr) }
18
- end
9
+ # @param event [PgEventstore::Event]
10
+ # @param revision [Integer]
11
+ # @return [PgEventstore::Event]
12
+ def call(event, revision)
13
+ event.class.new(
14
+ id: event.id, data: event.data, metadata: event.metadata, type: event.type, stream_revision: revision
15
+ ).tap do |e|
16
+ %i[link_id link_partition_id stream_revision].each { |attr| e.readonly!(attr) }
19
17
  end
20
18
  end
21
19
  end
@@ -13,20 +13,27 @@ module PgEventstore
13
13
  # @raise [PgEventstore::WrongExpectedRevisionError]
14
14
  # @raise [PgEventstore::NotPersistedEventError]
15
15
  def call(stream, *events, options: {})
16
- events.each(&method(:check_event_presence))
16
+ check_events_presence(events)
17
17
  append_cmd = Append.new(queries)
18
- append_cmd.call(stream, *events, options: options, event_modifier: EventModifiers::PrepareLinkEvent)
18
+ append_cmd.call(
19
+ stream, *events, options: options, event_modifier: EventModifiers::PrepareLinkEvent.new(queries.partitions)
20
+ )
19
21
  end
20
22
 
21
23
  private
22
24
 
23
- # Checks if Event#id is present. An event must have the #id value in order to be linked.
24
- # @param event [PgEventstore::Event]
25
+ # Checks if the given events are persisted events. This is needed to prevent potentially non-existing id valuess
26
+ # from appearing in #link_id column.
27
+ # @param events [Array<PgEventstore::Event>]
25
28
  # @return [void]
26
- def check_event_presence(event)
27
- return if queries.events.event_exists?(event.id)
29
+ def check_events_presence(events)
30
+ ids_from_db = queries.events.ids_from_db(events)
31
+ (events.map(&:id) - ids_from_db).tap do |missing_ids|
32
+ return if missing_ids.empty?
28
33
 
29
- raise NotPersistedEventError, event
34
+ missing_event = events.find { |event| event.id == missing_ids.first }
35
+ raise NotPersistedEventError, missing_event
36
+ end
30
37
  end
31
38
  end
32
39
  end
@@ -139,7 +139,7 @@ module PgEventstore
139
139
 
140
140
  # @param set [String] subscriptions set name
141
141
  # @param name [String] subscription's name
142
- # @param lock_id [String] UUIDv4
142
+ # @param lock_id [Integer]
143
143
  def initialize(set, name, lock_id)
144
144
  @set = set
145
145
  @name = name
@@ -151,31 +151,29 @@ module PgEventstore
151
151
  def user_friendly_message
152
152
  <<~TEXT.strip
153
153
  Could not lock subscription from #{set.inspect} set with #{name.inspect} name. It is already locked by \
154
- #{lock_id.inspect} set.
154
+ ##{lock_id.inspect} set.
155
155
  TEXT
156
156
  end
157
157
  end
158
158
 
159
- class SubscriptionUnlockError < Error
160
- attr_reader :set, :name, :expected_locked_by, :actual_locked_by
159
+ class WrongLockIdError < Error
160
+ attr_reader :set, :name, :lock_id
161
161
 
162
- # @param set [String] subscription's set name
162
+ # @param set [String] subscriptions set name
163
163
  # @param name [String] subscription's name
164
- # @param expected_locked_by [String] UUIDv4
165
- # @param actual_locked_by [String, nil] UUIDv4
166
- def initialize(set, name, expected_locked_by, actual_locked_by)
164
+ # @param lock_id [Integer]
165
+ def initialize(set, name, lock_id)
167
166
  @set = set
168
167
  @name = name
169
- @expected_locked_by = expected_locked_by
170
- @actual_locked_by = actual_locked_by
168
+ @lock_id = lock_id
171
169
  super(user_friendly_message)
172
170
  end
173
171
 
174
172
  # @return [String]
175
173
  def user_friendly_message
176
174
  <<~TEXT.strip
177
- Failed to unlock subscription from #{set.inspect} set with #{name.inspect} name by \
178
- #{expected_locked_by.inspect} lock id. It is currently locked by #{actual_locked_by.inspect} lock id.
175
+ Could not update subscription from #{set.inspect} set with #{name.inspect} name. It is locked by \
176
+ ##{lock_id.inspect} set suddenly.
179
177
  TEXT
180
178
  end
181
179
  end
@@ -31,6 +31,14 @@ module PgEventstore
31
31
  # @return [String, nil] UUIDv4 of an event the current event points to. If it is not nil, then the current
32
32
  # event is a link
33
33
  attribute(:link_id)
34
+ # @!attribute link_partition_id
35
+ # @return [Integer, nil] a partition id of an event the link event points to. It is used to load original event
36
+ # when resolve_link_tos: true option is provided when reading events.
37
+ attribute(:link_partition_id)
38
+ # @!attribute link
39
+ # @return [PgEventstore::Event, nil] when resolve_link_tos: true option is provided during the read of events and
40
+ # event is a link event - this attribute will be pointing on that link
41
+ attribute(:link)
34
42
  # @!attribute created_at
35
43
  # @return [Time, nil] a timestamp an event was created at
36
44
  attribute(:created_at)
@@ -41,7 +49,7 @@ module PgEventstore
41
49
  def ==(other)
42
50
  return false unless other.is_a?(PgEventstore::Event)
43
51
 
44
- attributes_hash == other.attributes_hash
52
+ attributes_hash.except(:link) == other.attributes_hash.except(:link)
45
53
  end
46
54
 
47
55
  # Detect whether an event is a link event
@@ -27,6 +27,7 @@ module PgEventstore
27
27
  event.stream = PgEventstore::Stream.new(
28
28
  **attrs.slice('context', 'stream_name', 'stream_id').transform_keys(&:to_sym)
29
29
  )
30
+ event.link = without_middlewares.deserialize(attrs['link']) if attrs.key?('link')
30
31
  event
31
32
  end
32
33
 
@@ -15,17 +15,36 @@ module PgEventstore
15
15
  @deserializer = deserializer
16
16
  end
17
17
 
18
- # @param id [String, nil]
18
+ # @param event [PgEventstore::Event]
19
19
  # @return [Boolean]
20
- def event_exists?(id)
21
- return false if id.nil?
20
+ def event_exists?(event)
21
+ return false if event.id.nil? || event.stream.nil?
22
22
 
23
- sql_builder = SQLBuilder.new.select('1 as exists').from('events').where('id = ?', id).limit(1)
23
+ sql_builder = SQLBuilder.new.select('1 as exists').from('events').where('id = ?', event.id).limit(1)
24
+ sql_builder.where(
25
+ 'context = ? and stream_name = ? and type = ?', event.stream.context, event.stream.stream_name, event.type
26
+ )
24
27
  connection.with do |conn|
25
28
  conn.exec_params(*sql_builder.to_exec_params)
26
29
  end.to_a.dig(0, 'exists') == 1
27
30
  end
28
31
 
32
+ # Takes an array of potentially persisted events and loads their ids from db. Those ids can be later used to check
33
+ # whether events are actually existing events.
34
+ # @param events [Array<PgEventstore::Event>]
35
+ # @return [Array<Integer>]
36
+ def ids_from_db(events)
37
+ sql_builder = SQLBuilder.new.from('events').select('id')
38
+ partition_attrs = events.map { |event| [event.stream&.context, event.stream&.stream_name, event.type] }.uniq
39
+ partition_attrs.each do |context, stream_name, event_type|
40
+ sql_builder.where_or('context = ? and stream_name = ? and type = ?', context, stream_name, event_type)
41
+ end
42
+ sql_builder.where('id = ANY(?::uuid[])', events.map(&:id))
43
+ PgEventstore.connection.with do |conn|
44
+ conn.exec_params(*sql_builder.to_exec_params)
45
+ end.to_a.map { |attrs| attrs['id'] }
46
+ end
47
+
29
48
  # @param stream [PgEventstore::Stream]
30
49
  # @return [Integer, nil]
31
50
  def stream_revision(stream)
@@ -47,6 +66,7 @@ module PgEventstore
47
66
  raw_events = connection.with do |conn|
48
67
  conn.exec_params(*exec_params)
49
68
  end.to_a
69
+ raw_events = links_resolver.resolve(raw_events) if options[:resolve_link_tos]
50
70
  deserializer.deserialize_many(raw_events)
51
71
  end
52
72
 
@@ -55,7 +75,7 @@ module PgEventstore
55
75
  # @return [PgEventstore::Event]
56
76
  def insert(stream, events)
57
77
  sql_rows_for_insert, values = prepared_statements(stream, events)
58
- columns = %w[id data metadata stream_revision link_id type context stream_name stream_id]
78
+ columns = %w[id data metadata stream_revision link_id link_partition_id type context stream_name stream_id]
59
79
 
60
80
  sql = <<~SQL
61
81
  INSERT INTO events (#{columns.join(', ')})
@@ -80,7 +100,9 @@ module PgEventstore
80
100
  values = []
81
101
  sql_rows_for_insert = events.map do |event|
82
102
  event = serializer.serialize(event)
83
- attributes = event.options_hash.slice(:id, :data, :metadata, :stream_revision, :link_id, :type)
103
+ attributes = event.options_hash.slice(
104
+ :id, :data, :metadata, :stream_revision, :link_id, :link_partition_id, :type
105
+ )
84
106
 
85
107
  attributes = attributes.merge(stream.to_hash)
86
108
  prepared = attributes.values.map do |value|
@@ -106,5 +128,10 @@ module PgEventstore
106
128
 
107
129
  QueryBuilders::EventsFiltering.specific_stream_filtering(stream, options)
108
130
  end
131
+
132
+ # @return [PgEventstore::LinksResolver]
133
+ def links_resolver
134
+ LinksResolver.new(connection)
135
+ end
109
136
  end
110
137
  end
@@ -0,0 +1,53 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PgEventstore
4
+ # @!visibility private
5
+ class LinksResolver
6
+ attr_reader :connection
7
+ private :connection
8
+
9
+ # @param connection [PgEventstore::Connection]
10
+ def initialize(connection)
11
+ @connection = connection
12
+ end
13
+
14
+ # Takes an array of events, look for link events in there and replaces link events with original events
15
+ # @param raw_events [Array<Hash>]
16
+ # @return [Array<Hash>]
17
+ def resolve(raw_events)
18
+ link_events = raw_events.select { _1['link_partition_id'] }.group_by { _1['link_partition_id'] }
19
+ return raw_events if link_events.empty?
20
+
21
+ original_events = load_original_events(link_events).to_h { |attrs| [attrs['id'], attrs] }
22
+ raw_events.map do |attrs|
23
+ original_event = original_events[attrs['link_id']]
24
+ next attrs unless original_event
25
+
26
+ original_event.merge('link' => attrs).merge(attrs.except(*original_event.keys))
27
+ end
28
+ end
29
+
30
+ private
31
+
32
+ # @param link_events [Hash{Integer => Array<Hash>}] partition id to link events association
33
+ # @return [Array<Hash>] original events
34
+ def load_original_events(link_events)
35
+ partitions = partition_queries.find_by_ids(link_events.keys)
36
+ sql_builders = partitions.map do |partition|
37
+ sql_builder = SQLBuilder.new.select('*').from(partition['table_name'])
38
+ sql_builder.where('id = ANY(?::uuid[])', link_events[partition['id']].map { _1['link_id'] })
39
+ end
40
+ sql_builder = sql_builders[1..].each_with_object(sql_builders.first) do |builder, top_builder|
41
+ top_builder.union(builder)
42
+ end
43
+
44
+ connection.with do |conn|
45
+ conn.exec_params(*sql_builder.to_exec_params)
46
+ end.to_a
47
+ end
48
+
49
+ def partition_queries
50
+ PartitionQueries.new(connection)
51
+ end
52
+ end
53
+ end
@@ -165,6 +165,14 @@ module PgEventstore
165
165
  end.to_a.dig(0, 'exists') == 1
166
166
  end
167
167
 
168
+ # @param ids [Array<Integer>]
169
+ # @return [Array<Hash>]
170
+ def find_by_ids(ids)
171
+ connection.with do |conn|
172
+ conn.exec_params('select * from partitions where id = ANY($1::bigint[])', [ids])
173
+ end.to_a
174
+ end
175
+
168
176
  # @param stream [PgEventstore::Stream]
169
177
  # @return [String]
170
178
  def context_partition_name(stream)
@@ -11,15 +11,27 @@ module PgEventstore
11
11
  @connection = connection
12
12
  end
13
13
 
14
+ # @see #find_by or #create for available arguments
15
+ # @return [Hash]
16
+ def find_or_create_by(...)
17
+ transaction_queries.transaction do
18
+ find_by(...) || create(...)
19
+ end
20
+ end
21
+
14
22
  # @param subscription_id [Integer]
23
+ # @param subscriptions_set_id [Integer]
15
24
  # @param command_name [String]
16
25
  # @return [Hash, nil]
17
- def find_by(subscription_id:, command_name:)
26
+ def find_by(subscription_id:, subscriptions_set_id:, command_name:)
18
27
  sql_builder =
19
28
  SQLBuilder.new.
20
29
  select('*').
21
30
  from('subscription_commands').
22
- where('subscription_id = ? AND name = ?', subscription_id, command_name)
31
+ where(
32
+ 'subscription_id = ? AND subscriptions_set_id = ? AND name = ?',
33
+ subscription_id, subscriptions_set_id, command_name
34
+ )
23
35
  pg_result = connection.with do |conn|
24
36
  conn.exec_params(*sql_builder.to_exec_params)
25
37
  end
@@ -29,23 +41,25 @@ module PgEventstore
29
41
  end
30
42
 
31
43
  # @param subscription_id [Integer]
44
+ # @param subscriptions_set_id [Integer]
32
45
  # @param command_name [String]
33
46
  # @return [Hash]
34
- def create_by(subscription_id:, command_name:)
47
+ def create(subscription_id:, subscriptions_set_id:, command_name:)
35
48
  sql = <<~SQL
36
- INSERT INTO subscription_commands (name, subscription_id)
37
- VALUES ($1, $2)
49
+ INSERT INTO subscription_commands (name, subscription_id, subscriptions_set_id)
50
+ VALUES ($1, $2, $3)
38
51
  RETURNING *
39
52
  SQL
40
53
  pg_result = connection.with do |conn|
41
- conn.exec_params(sql, [command_name, subscription_id])
54
+ conn.exec_params(sql, [command_name, subscription_id, subscriptions_set_id])
42
55
  end
43
56
  deserialize(pg_result.to_a.first)
44
57
  end
45
58
 
46
59
  # @param subscription_ids [Array<Integer>]
60
+ # @param subscriptions_set_id [Integer]
47
61
  # @return [Array<Hash>]
48
- def find_commands(subscription_ids)
62
+ def find_commands(subscription_ids, subscriptions_set_id:)
49
63
  return [] if subscription_ids.empty?
50
64
 
51
65
  sql = subscription_ids.size.times.map do
@@ -55,6 +69,7 @@ module PgEventstore
55
69
  SQLBuilder.new.select('*').
56
70
  from('subscription_commands').
57
71
  where("subscription_id IN (#{sql})", *subscription_ids).
72
+ where("subscriptions_set_id = ?", subscriptions_set_id).
58
73
  order('id ASC')
59
74
  pg_result = connection.with do |conn|
60
75
  conn.exec_params(*sql_builder.to_exec_params)
@@ -72,6 +87,11 @@ module PgEventstore
72
87
 
73
88
  private
74
89
 
90
+ # @return [PgEventstore::TransactionQueries]
91
+ def transaction_queries
92
+ TransactionQueries.new(connection)
93
+ end
94
+
75
95
  # @param hash [Hash]
76
96
  # @return [Hash]
77
97
  def deserialize(hash)