pg_eventstore 1.13.4 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop.yml +1 -1
  3. data/CHANGELOG.md +13 -0
  4. data/Dockerfile +3 -0
  5. data/README.md +20 -7
  6. data/db/migrations/10_setup_pg_cron.rb +23 -0
  7. data/db/migrations/11_add_events_link_global_position.sql +1 -0
  8. data/db/migrations/12_migrate_legacy_links.rb +83 -0
  9. data/db/migrations/13_remove_events_link_id.sql +6 -0
  10. data/db/migrations/14_remove_ids_events_id_index.sql +1 -0
  11. data/db/migrations/9_create_events_horizon.sql +21 -0
  12. data/docs/appending_events.md +1 -1
  13. data/docs/events_and_streams.md +1 -1
  14. data/docs/multiple_commands.md +16 -1
  15. data/lib/pg_eventstore/callbacks.rb +7 -5
  16. data/lib/pg_eventstore/cli/try_to_delete_subscriptions_set.rb +2 -2
  17. data/lib/pg_eventstore/client.rb +7 -5
  18. data/lib/pg_eventstore/commands/all_stream_read_grouped.rb +3 -3
  19. data/lib/pg_eventstore/commands/append.rb +3 -3
  20. data/lib/pg_eventstore/commands/event_modifiers/prepare_link_event.rb +5 -2
  21. data/lib/pg_eventstore/commands/event_modifiers/prepare_regular_event.rb +1 -1
  22. data/lib/pg_eventstore/commands/link_to.rb +6 -6
  23. data/lib/pg_eventstore/commands/multiple.rb +2 -2
  24. data/lib/pg_eventstore/commands/regular_stream_read_grouped.rb +1 -1
  25. data/lib/pg_eventstore/commands/regular_stream_read_paginated.rb +1 -1
  26. data/lib/pg_eventstore/commands/system_stream_read_paginated.rb +1 -1
  27. data/lib/pg_eventstore/connection.rb +1 -35
  28. data/lib/pg_eventstore/errors.rb +1 -1
  29. data/lib/pg_eventstore/event.rb +5 -5
  30. data/lib/pg_eventstore/extensions/options_extension.rb +40 -11
  31. data/lib/pg_eventstore/maintenance.rb +1 -1
  32. data/lib/pg_eventstore/queries/event_queries.rb +7 -7
  33. data/lib/pg_eventstore/queries/links_resolver.rb +6 -3
  34. data/lib/pg_eventstore/queries/partition_queries.rb +1 -1
  35. data/lib/pg_eventstore/queries/transaction_queries.rb +10 -4
  36. data/lib/pg_eventstore/query_builders/events_filtering.rb +2 -2
  37. data/lib/pg_eventstore/query_builders/partitions_filtering.rb +2 -2
  38. data/lib/pg_eventstore/stream.rb +1 -1
  39. data/lib/pg_eventstore/subscriptions/basic_runner.rb +4 -4
  40. data/lib/pg_eventstore/subscriptions/callback_handlers/subscription_feeder_handlers.rb +1 -1
  41. data/lib/pg_eventstore/subscriptions/callback_handlers/subscription_runner_handlers.rb +2 -11
  42. data/lib/pg_eventstore/subscriptions/events_processor.rb +1 -1
  43. data/lib/pg_eventstore/subscriptions/queries/subscription_command_queries.rb +5 -5
  44. data/lib/pg_eventstore/subscriptions/queries/subscription_queries.rb +2 -2
  45. data/lib/pg_eventstore/subscriptions/queries/subscription_service_queries.rb +78 -0
  46. data/lib/pg_eventstore/subscriptions/queries/subscriptions_set_command_queries.rb +2 -2
  47. data/lib/pg_eventstore/subscriptions/queries/subscriptions_set_queries.rb +1 -1
  48. data/lib/pg_eventstore/subscriptions/subscription.rb +7 -6
  49. data/lib/pg_eventstore/subscriptions/subscription_feeder.rb +2 -2
  50. data/lib/pg_eventstore/subscriptions/subscription_handler_performance.rb +1 -3
  51. data/lib/pg_eventstore/subscriptions/subscription_runner.rb +3 -16
  52. data/lib/pg_eventstore/subscriptions/subscription_runners_feeder.rb +10 -2
  53. data/lib/pg_eventstore/subscriptions/subscriptions_manager.rb +11 -16
  54. data/lib/pg_eventstore/tasks/setup.rake +30 -31
  55. data/lib/pg_eventstore/utils.rb +8 -0
  56. data/lib/pg_eventstore/version.rb +1 -1
  57. data/lib/pg_eventstore/web/application.rb +5 -5
  58. data/lib/pg_eventstore/web/paginator/events_collection.rb +3 -3
  59. data/lib/pg_eventstore/web/paginator/helpers.rb +3 -3
  60. data/lib/pg_eventstore/web/subscriptions/helpers.rb +2 -2
  61. data/lib/pg_eventstore.rb +4 -4
  62. data/pg_eventstore.gemspec +1 -1
  63. data/sig/pg_eventstore/client.rbs +1 -1
  64. data/sig/pg_eventstore/commands/multiple.rbs +1 -1
  65. data/sig/pg_eventstore/event.rbs +4 -4
  66. data/sig/pg_eventstore/extensions/options_extension.rbs +9 -1
  67. data/sig/pg_eventstore/queries/event_queries.rbs +11 -11
  68. data/sig/pg_eventstore/queries/links_resolver.rbs +5 -5
  69. data/sig/pg_eventstore/queries/transaction_queries.rbs +2 -2
  70. data/sig/pg_eventstore/subscriptions/callback_handlers/subscription_runner_handlers.rbs +0 -3
  71. data/sig/pg_eventstore/subscriptions/queries/subscription_queries.rbs +13 -13
  72. data/sig/pg_eventstore/subscriptions/queries/subscription_service_queries.rbs +19 -0
  73. data/sig/pg_eventstore/subscriptions/subscription_runner.rbs +0 -3
  74. data/sig/pg_eventstore/subscriptions/subscription_runners_feeder.rbs +10 -3
  75. data/sig/pg_eventstore/utils.rbs +10 -2
  76. metadata +11 -6
  77. data/lib/pg_eventstore/subscriptions/queries/service_queries.rb +0 -73
  78. data/lib/pg_eventstore/subscriptions/subscription_position_evaluation.rb +0 -195
  79. data/sig/pg_eventstore/subscriptions/queries/service_queries.rbs +0 -15
  80. data/sig/pg_eventstore/subscriptions/subscription_position_evaluation.rbs +0 -53
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 94443dc3aa36701d5e126573a5babdd4585932d803b766b06b94c861f6b4ef5a
4
- data.tar.gz: 86fcca21a3a2d3da35f3c760542b23cc97ae93ce97bae7b9baf011a957a9ffea
3
+ metadata.gz: a4799e200314277c53e9991483c429267213a9243a93c0ea85e7a23fa8c9b1ad
4
+ data.tar.gz: 38c68fd540c903d19ac316aca849007f3b4c27ab94dc09cfb2e71a9b9acdd734
5
5
  SHA512:
6
- metadata.gz: 4fb5ed37edce4c557082ba60cfa18f7209f795fa29b98dff9f848bb83b0b1cec889f4a7ca7f77bef5c0d9c6b4065d53765dcfcdf50dffe2628c3e3ffb57d26ad
7
- data.tar.gz: 0bc33722f13c57c11888ac205873ac93a3610ef5218e8dccf7c3e64d671852757e9d755ffe17ea8176b238dbe1722df47db522b5252ddceed52e14534c164c45
6
+ metadata.gz: 347354eb5327bf7a38df0682710f9b5a676488910ed3d0e7e967c9bcccd1d4e4b784c87e178dc6f4b5fcea2c8f681bfa0209618cf58f76dd5b36db1577d656f0
7
+ data.tar.gz: a567b9bfee1112e16d4f8b96b130e0f01c12b1f5349429c1ac873f4f3b4daa36628239b041eeecdc0f69a9807210d36bb30352dcb05b4100ef14eda37f1c6133
data/.rubocop.yml CHANGED
@@ -2,7 +2,7 @@ plugins:
2
2
  - rubocop-rspec
3
3
 
4
4
  AllCops:
5
- TargetRubyVersion: '3.0'
5
+ TargetRubyVersion: '3.2'
6
6
  NewCops: enable
7
7
  Exclude:
8
8
  - 'benchmark/*'
data/CHANGELOG.md CHANGED
@@ -1,5 +1,18 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [2.0.0]
4
+
5
+ - **Breaking change**: `pg_eventstore` now requires [pg_cron](https://github.com/citusdata/pg_cron) extension
6
+ - **Breaking change**: `pg_eventstore` now requires PostgreSQL v16+
7
+ - Greatly decreased the number of connections, used by `pg_eventstore` subscriptions
8
+ - **Breaking change**: drop support of Ruby v3.0 and v3.1. The gem now requires Ruby v3.2+
9
+ - **Breaking change**: `PgEventstore::Extensions::OptionsExtension::Options` class is no longer a child of `Set` class - it has independent implementation now
10
+ - Add support of Ruby v4.0
11
+ - `Client#multiple` method now accepts `read_only` keyword argument. When it is set to true - transaction is run in read-only mode
12
+ - **Breaking change**: rework links implementation. This change boosts performance, but affects the database structure, so your previous database dumps become incompatible with this change. `PgEventstore::Event#link_id` was replaced by `PgEventstore::Event#link_global_position`
13
+
14
+ Changes above require you to run migrations - `bundle exec rake pg_eventstore:migrate`. One of the migrations also migrates existing data using several concurrent workers(threads). You can adjust the number of workers using `CONCURRENCY` environment variable. Default number of concurrent workers is `10`. **Migrations require a downtime - no reads/writes should be performed during the time of the migrations, so plan your maintenance downtime accordingly.**
15
+
3
16
  ## [1.13.4]
4
17
 
5
18
  - Fix subscriptions potentially skipping events when multiple events are appended in concurrent transactions
data/Dockerfile ADDED
@@ -0,0 +1,3 @@
1
+ FROM postgres:18
2
+
3
+ RUN apt-get update && apt-get -y install postgresql-18-cron
data/README.md CHANGED
@@ -4,27 +4,30 @@ Implements database and API to store and read events in event sourced systems.
4
4
 
5
5
  ## Requirements
6
6
 
7
- - `pg_eventstore` requires a PostgreSQL database with jsonb data type support (which means you need to have v9.2+). However it is recommended to use a non [EOL](https://www.postgresql.org/support/versioning/) PostgreSQL version, because the development of this gem is targeted at current PostgreSQL versions.
8
- - It is recommended you to have the default value set for `default_transaction_isolation` PostgreSQL config setting(`"read committed"`) as the implementation relies on it. All other transaction isolation levels(`"repeatable read"` and `"serializable"`) may cause unexpected serialization errors.
7
+ - `pg_eventstore` requires a PostgreSQL v16+ with [pg_cron](https://github.com/citusdata/pg_cron) extension installed.
8
+ - `pg_evenstore` requires a separate detabase. However, it is recommended that you spin it up on a separate PostgreSQL instance in a production environment.
9
+ - `pg_eventstore` requires `default_transaction_isolation` server config option to be set to `'read committed'` (default behavior). Having this value set to move strict isolation level may result in unexpected behavior.
9
10
  - It is recommended to use a connection pooler (for example [PgBouncer](https://www.pgbouncer.org/)) in `transaction` pool mode to lower the load on a database.
10
11
  - `pg_eventstore` requires ruby v3+. The development of this gem is targeted at [current](https://endoflife.date/ruby) ruby versions.
11
12
 
12
13
  ## Installation
13
14
 
14
15
  Install the gem and add to the application's Gemfile by executing:
15
-
16
- $ bundle add pg_eventstore
16
+ ```bash
17
+ bundle add pg_eventstore
18
+ ```
17
19
 
18
20
  If bundler is not being used to manage dependencies, install the gem by executing:
19
-
20
- $ gem install pg_eventstore
21
+ ```bash
22
+ gem install pg_eventstore
23
+ ```
21
24
 
22
25
  ## Usage
23
26
 
24
27
  Before start using the gem - you have to create the database. Please include this line into your `Rakefile`:
25
28
 
26
29
  ```ruby
27
- load "pg_eventstore/tasks/setup.rake"
30
+ load 'pg_eventstore/tasks/setup.rake'
28
31
  ```
29
32
 
30
33
  This will include necessary rake tasks. You can now run
@@ -54,6 +57,16 @@ Documentation chapters:
54
57
 
55
58
  The gem is shipped with its own CLI. Use `pg-eventstore --help` to find out its capabilities.
56
59
 
60
+ ## Maintenance
61
+
62
+ You may want to backup your eventstore database. It is important to mention that you don't want to dump/restore records of `events_horizon` table. `events_horizon` table is used to supply subscriptions functionality and contains temporary data which is scoped to the PostgreSQL cluster they were created in. **Thus, it is even may be harmful if you restore records from this table into a new PostgreSQL cluster. Simply exclude that table's data when performing backups.** Example:
63
+
64
+ ```bash
65
+ pg_dump --exclude-table-data=events_horizon eventstore -U postgres > eventstore.sql
66
+ ```
67
+
68
+ Also, it is important you create and migrate new database via provided rake commands - they include an important setup of `pg_cron` jobs as well. **Even if you would like to restore your db backup on clean PostgreSQL instance - please initialize pg_eventstore via built-in tools first.**
69
+
57
70
  ## RSpec
58
71
 
59
72
  ### Clean up test db
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ PgEventstore.connection(:_postgres_db_connection).with do |conn|
4
+ conn.exec(<<~SQL)
5
+ CREATE EXTENSION IF NOT EXISTS pg_cron
6
+ SQL
7
+ conn.exec_params(<<~SQL, ["prune_#{PgEventstore::MigrationHelpers.db_name}_events_horizon", PgEventstore::MigrationHelpers.db_name])
8
+ SELECT cron.schedule_in_database(
9
+ $1,
10
+ '*/10 * * * *',
11
+ $$DELETE FROM events_horizon WHERE xact_id <= (SELECT xact_id FROM events_horizon ORDER BY xact_id DESC OFFSET 100 LIMIT 1)$$,
12
+ $2
13
+ )
14
+ SQL
15
+ # Store information about finished cron jobs for 1 day
16
+ conn.exec(<<~SQL)
17
+ SELECT cron.schedule(
18
+ 'delete-job-run-details',
19
+ '0 12 * * *',
20
+ $$DELETE FROM cron.job_run_details WHERE end_time < now() - interval '1 day'$$
21
+ );
22
+ SQL
23
+ end
@@ -0,0 +1 @@
1
+ ALTER TABLE public.events ADD COLUMN link_global_position bigint;
@@ -0,0 +1,83 @@
1
+ # frozen_string_literal: true
2
+
3
+ CONCURRENCY = ENV['CONCURRENCY']&.to_i || 10
4
+
5
+ PgEventstore.configure(name: :_eventstore_db_connection) do |config|
6
+ config.connection_pool_size = CONCURRENCY
7
+ end
8
+
9
+ partitions = PgEventstore.connection(:_eventstore_db_connection).with do |conn|
10
+ conn.exec('select id, table_name from partitions')
11
+ end
12
+ partitions = partitions.to_h { |attrs| [attrs['id'], attrs['table_name']] }
13
+
14
+ total_links = PgEventstore.connection(:_eventstore_db_connection).with do |conn|
15
+ conn.exec_params(
16
+ 'select count(*) all_count from events where events.type = $1 and link_global_position is null',
17
+ [PgEventstore::Event::LINK_TYPE]
18
+ )
19
+ end.first['all_count']
20
+
21
+ puts "Migrating legacy links. Links to process: #{total_links}. Concurrency is #{CONCURRENCY} concurrent writers."
22
+ processed = 0
23
+ processed_was = 0
24
+ time = Time.now
25
+ lock = Thread::Mutex.new
26
+ threads = CONCURRENCY.times.map do |t|
27
+ Thread.new do
28
+ loop do
29
+ link_events = PgEventstore.connection(:_eventstore_db_connection).with do |conn|
30
+ conn.exec_params(<<~SQL, [PgEventstore::Event::LINK_TYPE, CONCURRENCY, t])
31
+ select * from events
32
+ where events.type = $1 and events.link_global_position is null and global_position % $2 = $3
33
+ limit 1_000
34
+ SQL
35
+ end.to_a
36
+ break if link_events.empty?
37
+
38
+ lock.synchronize { processed += link_events.size }
39
+ link_events = link_events.to_h { [_1['global_position'], _1] }
40
+ builders = link_events.values.map do |event|
41
+ builder = PgEventstore::SQLBuilder.new
42
+ builder.select("global_position, #{event['global_position']} as link_event_global_position")
43
+ builder.from(partitions[event['link_partition_id']]).where('id = ?', event['link_id'])
44
+ end
45
+ final_builder = PgEventstore::SQLBuilder.union_builders(builders)
46
+
47
+ positions_map = PgEventstore.connection(:_eventstore_db_connection).with do |conn|
48
+ conn.exec_params(*final_builder.to_exec_params)
49
+ end.to_a
50
+
51
+ update_queries = positions_map.map do |attrs|
52
+ <<~SQL
53
+ UPDATE events SET link_global_position = #{attrs['global_position']}
54
+ WHERE global_position = #{attrs['link_event_global_position']};
55
+ SQL
56
+ end
57
+
58
+ PgEventstore.connection(:_eventstore_db_connection).with do |conn|
59
+ conn.exec(update_queries.join("\n"))
60
+ end
61
+
62
+ # Only log from the first thread to prevent messages spam
63
+ next unless t == 0
64
+
65
+ lock.synchronize do
66
+ time_was = time
67
+ time = Time.now
68
+
69
+ performance_info = <<~TEXT.strip
70
+ Processed: #{processed}. Left: #{total_links - processed}. \
71
+ Performance: #{((processed - processed_was) / (time - time_was)).round(2)} events/second.
72
+ TEXT
73
+ processed_was = processed
74
+ print "#{performance_info} \r"
75
+ end
76
+ end
77
+ end
78
+ end
79
+ threads.each(&:join)
80
+
81
+ PgEventstore.connection(:_eventstore_db_connection).with do |conn|
82
+ conn.exec('VACUUM (ANALYZE) events;')
83
+ end
@@ -0,0 +1,6 @@
1
+ -- We need to drop view which uses link_id and re-create it after we remove link_id column
2
+ DROP VIEW "$streams";
3
+
4
+ ALTER TABLE public.events DROP COLUMN link_id;
5
+
6
+ CREATE VIEW "$streams" AS SELECT * FROM events WHERE stream_revision = 0;
@@ -0,0 +1 @@
1
+ DROP INDEX idx_events_id;
@@ -0,0 +1,21 @@
1
+ CREATE UNLOGGED TABLE events_horizon
2
+ (
3
+ global_position bigint not null,
4
+ xact_id xid8 not null default pg_current_xact_id()
5
+ );
6
+ CREATE INDEX idx_xact_id_and_created_at_and_global_position ON events_horizon USING btree(xact_id, global_position);
7
+ COMMENT ON TABLE events_horizon IS 'Internal use only. Data is limited to the PostgreSQL cluster in which it was created. DO NOT INCLUDE ITS DATA INTO YOUR DUMP.';
8
+
9
+ CREATE OR REPLACE FUNCTION log_events_horizon()
10
+ RETURNS TRIGGER AS $$
11
+ BEGIN
12
+ INSERT INTO events_horizon(global_position)
13
+ VALUES (NEW.global_position);
14
+ RETURN NEW;
15
+ END;
16
+ $$ LANGUAGE plpgsql;
17
+
18
+ CREATE TRIGGER log_events_horizon
19
+ AFTER INSERT ON events
20
+ FOR EACH ROW
21
+ EXECUTE FUNCTION log_events_horizon();
@@ -13,7 +13,7 @@ end
13
13
  event = SomethingHappened.new(data: { user_id: SecureRandom.uuid, title: "Something happened" })
14
14
  stream = PgEventstore::Stream.new(context: 'MyAwesomeContext', stream_name: 'SomeStream', stream_id: 'f37b82f2-4152-424d-ab6b-0cc6f0a53aae')
15
15
  PgEventstore.client.append_to_stream(stream, event)
16
- # => #<SomethingHappened:0x0 @context="MyAwesomeContext", @created_at=2023-11-30 14:47:31.296229 UTC, @data={"title"=>"Something happened", "user_id"=>"be52a81c-ad5b-4cfd-a039-0b7276974e6b"}, @global_position=7, @id="0b01137b-bdd8-4f0d-8ccf-f8c959e3a324", @link_id=nil, @metadata={}, @stream_id="f37b82f2-4152-424d-ab6b-0cc6f0a53aae", @stream_name="SomeStream", @stream_revision=0, @type="SomethingHappened">
16
+ # => #<SomethingHappened:0x0 @context="MyAwesomeContext", @created_at=2023-11-30 14:47:31.296229 UTC, @data={"title"=>"Something happened", "user_id"=>"be52a81c-ad5b-4cfd-a039-0b7276974e6b"}, @global_position=7, @id="0b01137b-bdd8-4f0d-8ccf-f8c959e3a324", @link_global_position=nil, @metadata={}, @stream_id="f37b82f2-4152-424d-ab6b-0cc6f0a53aae", @stream_name="SomeStream", @stream_revision=0, @type="SomethingHappened">
17
17
  ```
18
18
 
19
19
  ## Appending multiple events
@@ -16,7 +16,7 @@
16
16
  - `stream_revision` - Integer(optional, read only). A revision of an event inside its stream.
17
17
  - `data` - Hash(optional). Event's payload data. For example, if you have a `DescriptionChanged` event class, then you may want to have a description value in the event payload data. Example: `DescriptionChanged.new(data: { 'description' => 'Description of something', 'post_id' => SecureRandom.uuid })`
18
18
  - `metadata` - Hash(optional). Event metadata. Event meta information which is not part of an events data payload. Example: `{ published_by: publishing_user.id }`
19
- - `link_id` - String(UUIDv4, optional, read only). If an event is a link event (link events are pointers to other events), this attribute contains an `id` of the original event. Manually assigning this attribute has no effect. It is internally set when appending an event to the given stream or when reading events from the database.
19
+ - `link_global_position` - Integer(optional, read only). If an event is a link event (link events are pointers to other events), this attribute contains a `global_position` of the original event. Manually assigning this attribute has no effect. It is internally set when linking an event to the given stream or when reading events from the database.
20
20
  - `link_partition_id` - Integer(optional, read only). If an event is a link event - this attribute contains a partition `id` of original event. Manually assigning this attribute has no effect. It is internally set when appending an event to the given stream or when reading events from the database.
21
21
  - `link` - PgEventstore::Event(optional, read only). When reading from a stream using `resolve_link_tos: true`, if an event is resolved from a link - this attribute contains a `PgEventstore::Event` object which corresponds to that link. Manually assigning this attribute has no effect. It is internally set when reading events from the database.
22
22
  - `created_at` - Time(optional, read only). Database's timestamp when an event was appended to a stream. You may want to put your own timestamp into a `metadata` attribute - it may be useful when migrating between different databases. Manually assigning this attribute has no effect. It is internally set when appending an event to the given stream or when reading events from the database.
@@ -1,6 +1,6 @@
1
1
  # Multiple commands
2
2
 
3
- `pg_eventstore` implements the `#multiple` method to allow you to make several different commands atomic. Example:
3
+ `pg_eventstore` implements the `#multiple` method to allow you to make several different commands atomic. Internally it executes the given block within PostgreSQL transaction. Example:
4
4
 
5
5
  ```ruby
6
6
  PgEventstore.client.multiple do
@@ -11,6 +11,21 @@ PgEventstore.client.multiple do
11
11
  end
12
12
  ```
13
13
 
14
+ Optionally, you can provide `read_only: true` argument to run the transaction in read-only mode. This, however, will raise `PG::ReadOnlySqlTransaction` exception if any mutating query is executed within the block. Example:
15
+
16
+ ```ruby
17
+ # Good
18
+ PgEventstore.client.multiple(read_only: true) do
19
+ PgEventstore.client.read(stream1)
20
+ PgEventstore.client.read(stream2)
21
+ end
22
+ # Bad. Will raise error
23
+ PgEventstore.client.multiple(read_only: true) do
24
+ PgEventstore.client.append_to_stream(stream, event)
25
+ end
26
+ ```
27
+
28
+
14
29
  All commands inside a `multiple` block either all succeed or all fail. This allows you to easily implement complex business rules. However, it comes with a price of performance. The more you put in a single block, the higher the chance it will have conflicts with other commands run in parallel, increasing overall time to complete. **Because of this performance implications, do not put more events than needed in a `multple` block.** You may still want to use it though as it could simplify your implementation.
15
30
 
16
31
  **Please take into account that due to concurrency of parallel commands, a block of code may be re-run several times before succeeding.** So, if you put any piece of code besides `pg_evenstore`'s commands - make sure it is ready for re-runs. A good and a bad examples:
@@ -86,14 +86,16 @@ module PgEventstore
86
86
 
87
87
  # @param action [Object] an action to run
88
88
  # @return [Object] the result of passed block
89
- def run_callbacks(action, *args, **kwargs, &blk)
89
+ # rubocop:disable Style/ArgumentsForwarding
90
+ def run_callbacks(action, *, **, &)
90
91
  return (yield if block_given?) unless @callbacks[action]
91
92
 
92
- run_before_callbacks(action, *args, **kwargs)
93
- result = run_around_callbacks(action, *args, **kwargs, &blk)
94
- run_after_callbacks(action, *args, **kwargs)
93
+ run_before_callbacks(action, *, **)
94
+ result = run_around_callbacks(action, *, **, &)
95
+ run_after_callbacks(action, *, **)
95
96
  result
96
97
  end
98
+ # rubocop:enable Style/ArgumentsForwarding
97
99
 
98
100
  # @param action [Object]
99
101
  # @param filter [Symbol]
@@ -121,7 +123,7 @@ module PgEventstore
121
123
  end
122
124
 
123
125
  # @return [Object] the result of the passed block
124
- def run_around_callbacks(action, *args, **kwargs, &_blk)
126
+ def run_around_callbacks(action, *args, **kwargs, &)
125
127
  result = nil
126
128
  stack = [proc { result = yield if block_given? }]
127
129
  @callbacks[action][:around]&.reverse_each&.with_index do |callback, index|
@@ -26,14 +26,14 @@ module PgEventstore
26
26
  )
27
27
  cmd_name = SubscriptionFeederCommands.command_class('Ping').new.name
28
28
  subscriptions_set_commands_queries.find_or_create_by(
29
- subscriptions_set_id: subscriptions_set_id, command_name: cmd_name, data: {}
29
+ subscriptions_set_id:, command_name: cmd_name, data: {}
30
30
  )
31
31
  # Potentially CommandsHandler can be dead exactly at the same moment we expect it to process "Ping" command.
32
32
  # Wait for potential recover plus run interval and plus another second to allow potential processing of
33
33
  # "Ping" command. "Ping" command comes in prio, so it is guaranteed it will be processed as a first command.
34
34
  sleep RunnerRecoveryStrategies::RestoreConnection::TIME_BETWEEN_RETRIES + CommandsHandler::PULL_INTERVAL + 1
35
35
  existing_cmd = subscriptions_set_commands_queries.find_by(
36
- subscriptions_set_id: subscriptions_set_id, command_name: cmd_name
36
+ subscriptions_set_id:, command_name: cmd_name
37
37
  )
38
38
  if existing_cmd
39
39
  # "Ping" command wasn't consumed. Related process must be dead.
@@ -33,7 +33,7 @@ module PgEventstore
33
33
  events: event_queries(middlewares(middlewares)),
34
34
  transactions: transaction_queries
35
35
  )
36
- ).call(stream, *events_or_event, options: options)
36
+ ).call(stream, *events_or_event, options:)
37
37
  events_or_event.is_a?(Array) ? result : result.first
38
38
  end
39
39
 
@@ -47,9 +47,11 @@ module PgEventstore
47
47
  # PgEventstore.client.append_to_stream(...)
48
48
  # end
49
49
  #
50
+ # @param read_only [Boolean] whether transaction is read-only. Running mutation queries within read-only transaction
51
+ # will result in exception
50
52
  # @return the result of the given block
51
- def multiple(&blk)
52
- Commands::Multiple.new(Queries.new(transactions: transaction_queries)).call(&blk)
53
+ def multiple(read_only: false, &)
54
+ Commands::Multiple.new(Queries.new(transactions: transaction_queries)).call(read_only:, &)
53
55
  end
54
56
 
55
57
  # Read events from the specific stream or from "all" stream.
@@ -138,7 +140,7 @@ module PgEventstore
138
140
  cmd_class = stream.all_stream? ? Commands::AllStreamReadGrouped : Commands::RegularStreamReadGrouped
139
141
  cmd_class.
140
142
  new(Queries.new(partitions: partition_queries, events: event_queries(middlewares(middlewares)))).
141
- call(stream, options: options)
143
+ call(stream, options:)
142
144
  end
143
145
 
144
146
  # Links event from one stream into another stream. You can later access it by providing :resolve_link_tos option
@@ -160,7 +162,7 @@ module PgEventstore
160
162
  events: event_queries(middlewares(middlewares)),
161
163
  transactions: transaction_queries
162
164
  )
163
- ).call(stream, *events_or_event, options: options)
165
+ ).call(stream, *events_or_event, options:)
164
166
  events_or_event.is_a?(Array) ? result : result.first
165
167
  end
166
168
 
@@ -47,10 +47,10 @@ module PgEventstore
47
47
  def build_filter_options_for_streams(partition, stream_ids, options)
48
48
  stream_ids.map do |stream_id|
49
49
  filter = {
50
- streams: [{ context: partition.context, stream_name: partition.stream_name, stream_id: stream_id }],
50
+ streams: [{ context: partition.context, stream_name: partition.stream_name, stream_id: }],
51
51
  event_types: [partition.event_type],
52
52
  }
53
- options.merge(filter: filter, max_count: 1)
53
+ options.merge(filter:, max_count: 1)
54
54
  end
55
55
  end
56
56
 
@@ -62,7 +62,7 @@ module PgEventstore
62
62
  streams: [{ context: partition.context, stream_name: partition.stream_name }],
63
63
  event_types: [partition.event_type],
64
64
  }
65
- options.merge(filter: filter, max_count: 1)
65
+ options.merge(filter:, max_count: 1)
66
66
  end
67
67
  end
68
68
  end
@@ -50,19 +50,19 @@ module PgEventstore
50
50
  in [Integer, Integer]
51
51
  unless revision == expected_revision
52
52
  raise WrongExpectedRevisionError.new(
53
- revision: revision, expected_revision: expected_revision, stream: stream
53
+ revision:, expected_revision:, stream:
54
54
  )
55
55
  end
56
56
 
57
57
  in [Integer, Symbol]
58
58
  if revision == Stream::NON_EXISTING_STREAM_REVISION && expected_revision == :stream_exists
59
59
  raise WrongExpectedRevisionError.new(
60
- revision: revision, expected_revision: expected_revision, stream: stream
60
+ revision:, expected_revision:, stream:
61
61
  )
62
62
  end
63
63
  if revision > Stream::NON_EXISTING_STREAM_REVISION && expected_revision == :no_stream
64
64
  raise WrongExpectedRevisionError.new(
65
- revision: revision, expected_revision: expected_revision, stream: stream
65
+ revision:, expected_revision:, stream:
66
66
  )
67
67
  end
68
68
  end
@@ -19,9 +19,12 @@ module PgEventstore
19
19
  # @return [PgEventstore::Event]
20
20
  def call(event, revision)
21
21
  Event.new(
22
- link_id: event.id, link_partition_id: partition_id(event), type: Event::LINK_TYPE, stream_revision: revision
22
+ link_global_position: event.global_position,
23
+ link_partition_id: partition_id(event),
24
+ type: Event::LINK_TYPE,
25
+ stream_revision: revision
23
26
  ).tap do |e|
24
- %i[link_id link_partition_id type stream_revision].each { |attr| e.readonly!(attr) }
27
+ %i[link_global_position link_partition_id type stream_revision].each { |attr| e.readonly!(attr) }
25
28
  end
26
29
  end
27
30
 
@@ -13,7 +13,7 @@ module PgEventstore
13
13
  event.class.new(
14
14
  id: event.id, data: event.data, metadata: event.metadata, type: event.type, stream_revision: revision
15
15
  ).tap do |e|
16
- %i[link_id link_partition_id stream_revision].each { |attr| e.readonly!(attr) }
16
+ %i[link_global_position link_partition_id stream_revision].each { |attr| e.readonly!(attr) }
17
17
  end
18
18
  end
19
19
  end
@@ -16,22 +16,22 @@ module PgEventstore
16
16
  check_events_presence(events)
17
17
  append_cmd = Append.new(queries)
18
18
  append_cmd.call(
19
- stream, *events, options: options, event_modifier: EventModifiers::PrepareLinkEvent.new(queries.partitions)
19
+ stream, *events, options:, event_modifier: EventModifiers::PrepareLinkEvent.new(queries.partitions)
20
20
  )
21
21
  end
22
22
 
23
23
  private
24
24
 
25
- # Checks if the given events are persisted events. This is needed to prevent potentially non-existing id valuess
26
- # from appearing in #link_id column.
25
+ # Checks if the given events are persisted events. This is needed to prevent potentially non-existing
26
+ # global_position values from appearing in #link_global_position column.
27
27
  # @param events [Array<PgEventstore::Event>]
28
28
  # @return [void]
29
29
  def check_events_presence(events)
30
- ids_from_db = queries.events.ids_from_db(events)
31
- missing_ids = events.map(&:id) - ids_from_db
30
+ global_positions_from_db = queries.events.global_positions_from_db(events)
31
+ missing_ids = events.map(&:global_position) - global_positions_from_db
32
32
  return if missing_ids.empty?
33
33
 
34
- missing_event = events.find { |event| event.id == missing_ids.first }
34
+ missing_event = events.find { |event| event.global_position == missing_ids.first }
35
35
  raise NotPersistedEventError, missing_event
36
36
  end
37
37
  end
@@ -4,8 +4,8 @@ module PgEventstore
4
4
  module Commands
5
5
  # @!visibility private
6
6
  class Multiple < AbstractCommand
7
- def call(&blk)
8
- queries.transactions.transaction(&blk)
7
+ def call(read_only:, &)
8
+ queries.transactions.transaction(read_only:, &)
9
9
  end
10
10
  end
11
11
  end
@@ -22,7 +22,7 @@ module PgEventstore
22
22
  )
23
23
  options_by_event_type = queries.partitions.partitions(stream_filters, event_types).map do |partition|
24
24
  filter = { event_types: [partition.event_type] }
25
- options.merge(filter: filter, max_count: 1)
25
+ options.merge(filter:, max_count: 1)
26
26
  end
27
27
  queries.events.grouped_events(stream, options_by_event_type, **options.slice(:resolve_link_tos))
28
28
  end
@@ -10,7 +10,7 @@ module PgEventstore
10
10
  next_revision = nil
11
11
  loop do
12
12
  options = options.merge(from_revision: next_revision) if next_revision
13
- events = read_cmd.call(stream, options: options)
13
+ events = read_cmd.call(stream, options:)
14
14
  yielder << events if events.any?
15
15
  if end_reached?(events, options[:max_count] || QueryBuilders::EventsFiltering::DEFAULT_LIMIT)
16
16
  raise StopIteration
@@ -10,7 +10,7 @@ module PgEventstore
10
10
  next_position = nil
11
11
  loop do
12
12
  options = options.merge(from_position: next_position) if next_position
13
- events = read_cmd.call(stream, options: options)
13
+ events = read_cmd.call(stream, options:)
14
14
  yielder << events if events.any?
15
15
  if end_reached?(events, options[:max_count] || QueryBuilders::EventsFiltering::DEFAULT_LIMIT)
16
16
  raise StopIteration
@@ -8,36 +8,6 @@ require_relative 'pg_connection'
8
8
 
9
9
  module PgEventstore
10
10
  class Connection
11
- # Starting from ruby v3.1 ConnectionPool closes connections after forking by default. For ruby v3 we need this patch
12
- # to correctly reload the ConnectionPool. Otherwise the same connection will leak into another process which will
13
- # result in disaster.
14
- # @!visibility private
15
- module Ruby30Patch
16
- def initialize(**)
17
- @current_pid = Process.pid
18
- @mutext = Mutex.new
19
- super
20
- end
21
-
22
- def with(&blk)
23
- reload_after_fork
24
- super
25
- end
26
-
27
- private
28
-
29
- def reload_after_fork
30
- return if @current_pid == Process.pid
31
-
32
- @mutext.synchronize do
33
- return if @current_pid == Process.pid
34
-
35
- @pool.reload(&:close)
36
- @current_pid = Process.pid
37
- end
38
- end
39
- end
40
-
41
11
  # @!attribute uri
42
12
  # @return [String]
43
13
  attr_reader :uri
@@ -63,7 +33,7 @@ module PgEventstore
63
33
  # A shorthand from ConnectionPool#with.
64
34
  # @yieldparam connection [PG::Connection] PostgreSQL connection instance
65
35
  # @return [Object] a value of a given block
66
- def with(&_blk)
36
+ def with(&)
67
37
  should_retry = true
68
38
  @pool.with do |conn|
69
39
  yield conn
@@ -107,7 +77,3 @@ module PgEventstore
107
77
  end
108
78
  end
109
79
  end
110
-
111
- if Gem::Version.new(RUBY_VERSION) < Gem::Version.new('3.1')
112
- PgEventstore::Connection.prepend(PgEventstore::Connection::Ruby30Patch)
113
- end
@@ -203,7 +203,7 @@ module PgEventstore
203
203
 
204
204
  # @return [String]
205
205
  def user_friendly_message
206
- "Event with #id #{event.id.inspect} must be present, but it could not be found."
206
+ "Event #{event.inspect} is not persisted a persisted event."
207
207
  end
208
208
  end
209
209
 
@@ -30,10 +30,10 @@ module PgEventstore
30
30
  # @!attribute metadata
31
31
  # @return [Hash] event's metadata
32
32
  attribute(:metadata) { {} }
33
- # @!attribute link_id
34
- # @return [String, nil] UUIDv4 of an event the current event points to. If it is not nil, then the current
35
- # event is a link
36
- attribute(:link_id)
33
+ # @!attribute link_global_position
34
+ # @return [Integer, nil] global_position of an event the current event points to. If it is not nil, then the
35
+ # current event is a link
36
+ attribute(:link_global_position)
37
37
  # @!attribute link_partition_id
38
38
  # @return [Integer, nil] a partition id of an event the link event points to. It is used to load original event
39
39
  # when resolve_link_tos: true option is provided when reading events.
@@ -58,7 +58,7 @@ module PgEventstore
58
58
  # Detect whether an event is a link event
59
59
  # @return [Boolean]
60
60
  def link?
61
- !link_id.nil?
61
+ !link_global_position.nil?
62
62
  end
63
63
 
64
64
  # Detect whether an event is a system event