sequent 7.1.1 → 8.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/bin/sequent +6 -107
- data/db/sequent_8_migration.sql +120 -0
- data/db/sequent_pgsql.sql +416 -0
- data/db/sequent_schema.rb +11 -57
- data/db/sequent_schema_indexes.sql +37 -0
- data/db/sequent_schema_partitions.sql +34 -0
- data/db/sequent_schema_tables.sql +74 -0
- data/lib/sequent/cli/app.rb +132 -0
- data/lib/sequent/cli/sequent_8_migration.rb +180 -0
- data/lib/sequent/configuration.rb +11 -8
- data/lib/sequent/core/aggregate_repository.rb +2 -2
- data/lib/sequent/core/aggregate_root.rb +32 -9
- data/lib/sequent/core/aggregate_snapshotter.rb +8 -6
- data/lib/sequent/core/command_record.rb +27 -18
- data/lib/sequent/core/command_service.rb +2 -2
- data/lib/sequent/core/event_publisher.rb +1 -1
- data/lib/sequent/core/event_record.rb +37 -17
- data/lib/sequent/core/event_store.rb +101 -119
- data/lib/sequent/core/helpers/array_with_type.rb +1 -1
- data/lib/sequent/core/helpers/association_validator.rb +2 -2
- data/lib/sequent/core/helpers/attribute_support.rb +8 -8
- data/lib/sequent/core/helpers/equal_support.rb +3 -3
- data/lib/sequent/core/helpers/message_matchers/has_attrs.rb +2 -0
- data/lib/sequent/core/helpers/message_router.rb +2 -2
- data/lib/sequent/core/helpers/param_support.rb +1 -3
- data/lib/sequent/core/helpers/pgsql_helpers.rb +32 -0
- data/lib/sequent/core/helpers/string_support.rb +1 -1
- data/lib/sequent/core/helpers/string_to_value_parsers.rb +1 -1
- data/lib/sequent/core/persistors/active_record_persistor.rb +1 -1
- data/lib/sequent/core/persistors/replay_optimized_postgres_persistor.rb +3 -4
- data/lib/sequent/core/projector.rb +1 -1
- data/lib/sequent/core/snapshot_record.rb +44 -0
- data/lib/sequent/core/snapshot_store.rb +105 -0
- data/lib/sequent/core/stream_record.rb +10 -15
- data/lib/sequent/dry_run/read_only_replay_optimized_postgres_persistor.rb +1 -1
- data/lib/sequent/dry_run/view_schema.rb +2 -3
- data/lib/sequent/generator/project.rb +5 -7
- data/lib/sequent/generator/template_aggregate/template_aggregate/commands.rb +2 -0
- data/lib/sequent/generator/template_aggregate/template_aggregate/events.rb +2 -0
- data/lib/sequent/generator/template_aggregate/template_aggregate/template_aggregate.rb +2 -0
- data/lib/sequent/generator/template_aggregate/template_aggregate/template_aggregate_command_handler.rb +2 -0
- data/lib/sequent/generator/template_aggregate/template_aggregate.rb +2 -0
- data/lib/sequent/generator/template_project/Gemfile +7 -5
- data/lib/sequent/generator/template_project/Rakefile +4 -2
- data/lib/sequent/generator/template_project/app/projectors/post_projector.rb +2 -0
- data/lib/sequent/generator/template_project/app/records/post_record.rb +2 -0
- data/lib/sequent/generator/template_project/config/initializers/sequent.rb +3 -8
- data/lib/sequent/generator/template_project/db/migrations.rb +3 -3
- data/lib/sequent/generator/template_project/lib/post/commands.rb +2 -0
- data/lib/sequent/generator/template_project/lib/post/events.rb +2 -0
- data/lib/sequent/generator/template_project/lib/post/post.rb +2 -0
- data/lib/sequent/generator/template_project/lib/post/post_command_handler.rb +2 -0
- data/lib/sequent/generator/template_project/lib/post.rb +2 -0
- data/lib/sequent/generator/template_project/my_app.rb +2 -1
- data/lib/sequent/generator/template_project/spec/app/projectors/post_projector_spec.rb +2 -0
- data/lib/sequent/generator/template_project/spec/lib/post/post_command_handler_spec.rb +9 -2
- data/lib/sequent/generator/template_project/spec/spec_helper.rb +4 -7
- data/lib/sequent/generator.rb +1 -1
- data/lib/sequent/internal/aggregate_type.rb +12 -0
- data/lib/sequent/internal/command_type.rb +12 -0
- data/lib/sequent/internal/event_type.rb +12 -0
- data/lib/sequent/internal/internal.rb +14 -0
- data/lib/sequent/internal/partitioned_aggregate.rb +26 -0
- data/lib/sequent/internal/partitioned_command.rb +16 -0
- data/lib/sequent/internal/partitioned_event.rb +29 -0
- data/lib/sequent/migrations/grouper.rb +90 -0
- data/lib/sequent/migrations/sequent_schema.rb +2 -1
- data/lib/sequent/migrations/view_schema.rb +76 -77
- data/lib/sequent/rake/migration_tasks.rb +49 -24
- data/lib/sequent/sequent.rb +1 -0
- data/lib/sequent/support/database.rb +20 -16
- data/lib/sequent/test/time_comparison.rb +1 -1
- data/lib/sequent/util/timer.rb +1 -1
- data/lib/version.rb +1 -1
- metadata +102 -21
- data/lib/sequent/generator/template_project/db/sequent_schema.rb +0 -52
- data/lib/sequent/generator/template_project/ruby-version +0 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d275476616c98974e95ec5b59af16651bb57e5b2e3b88440de43c00687cd4217
|
4
|
+
data.tar.gz: bfdd1340cc4817a2ee6bcc4a3bfd44a1203093078d8dab08e71ade3f3c2f2fe2
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 80fd3dbef368b2835b8bc51f3bcce8a06d2860befaa7b6854f99662152ed186d370b8a55993266c242350a2bb6abbd48f4128dab76f8338d648156d9e7d27bb7
|
7
|
+
data.tar.gz: 288fcc0e3f674dabffb326b36eb8a685975415f62857120a0bd6b0820dff8c10c479fd5510cfc0b09b6f46e562f5d1b7d1f6dfa6753cc64131e81d3465529cba
|
data/bin/sequent
CHANGED
@@ -1,111 +1,10 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
2
|
# frozen_string_literal: true
|
3
3
|
|
4
|
-
|
4
|
+
require 'gli'
|
5
|
+
require 'tty-prompt'
|
6
|
+
require './lib/version'
|
7
|
+
require './lib/sequent/cli/app'
|
5
8
|
|
6
|
-
|
7
|
-
|
8
|
-
abort('Please specify a command. i.e. `sequent new myapp`') if ARGV[1..-1].empty?
|
9
|
-
|
10
|
-
args = ARGV[1..-1].map(&:to_s).map(&:strip)
|
11
|
-
|
12
|
-
def new_project(args)
|
13
|
-
arguments = args.dup
|
14
|
-
name = arguments.shift
|
15
|
-
abort('Please specify a directory name. i.e. `sequent new myapp`') if name.empty?
|
16
|
-
|
17
|
-
Sequent::Generator::Project.new(name).execute
|
18
|
-
puts <<~NEXTSTEPS
|
19
|
-
|
20
|
-
Success!
|
21
|
-
|
22
|
-
Your brand spanking new sequent app is waiting for you in:
|
23
|
-
#{File.expand_path(name, Dir.pwd)}
|
24
|
-
|
25
|
-
To finish setting up your app:
|
26
|
-
cd #{name}
|
27
|
-
bundle install
|
28
|
-
bundle exec rake sequent:db:create
|
29
|
-
bundle exec rake sequent:db:create_view_schema
|
30
|
-
bundle exec rake sequent:migrate:online
|
31
|
-
bundle exec rake sequent:migrate:offline
|
32
|
-
|
33
|
-
Run the example specs:
|
34
|
-
SEQUENT_ENV=test bundle exec rake sequent:db:create
|
35
|
-
bundle exec rspec spec
|
36
|
-
|
37
|
-
To generate new aggregates use:
|
38
|
-
sequent generate <aggregate_name>. e.g. sequent generate address
|
39
|
-
|
40
|
-
For more information see:
|
41
|
-
https://www.sequent.io
|
42
|
-
|
43
|
-
Happy coding!
|
44
|
-
|
45
|
-
NEXTSTEPS
|
46
|
-
end
|
47
|
-
|
48
|
-
def generate_aggregate(args)
|
49
|
-
arguments = args.dup
|
50
|
-
aggregate_name = arguments.shift
|
51
|
-
abort('Please specify an aggregate name. i.e. `sequent g aggregate user`') unless args_valid?(aggregate_name)
|
52
|
-
|
53
|
-
Sequent::Generator::Aggregate.new(aggregate_name).execute
|
54
|
-
puts "#{aggregate_name} aggregate has been generated"
|
55
|
-
end
|
56
|
-
|
57
|
-
def generate_command(args)
|
58
|
-
arguments = args.dup
|
59
|
-
aggregate_name = arguments.shift
|
60
|
-
command_name = arguments.shift
|
61
|
-
attrs = arguments
|
62
|
-
|
63
|
-
unless args_valid?(aggregate_name, command_name)
|
64
|
-
abort('Please specify an aggregate name and command name. i.e. `sequent g command user AddUser`')
|
65
|
-
end
|
66
|
-
Sequent::Generator::Command.new(aggregate_name, command_name, attrs).execute
|
67
|
-
puts "#{command_name} command has been added to #{aggregate_name}"
|
68
|
-
end
|
69
|
-
|
70
|
-
def generate_event(args)
|
71
|
-
arguments = args.dup
|
72
|
-
aggregate_name = arguments.shift
|
73
|
-
event_name = arguments.shift
|
74
|
-
attrs = arguments
|
75
|
-
|
76
|
-
abort('Please specify an aggregate name and event name. i.e. `sequent g event user AddUser`') unless args_valid?(
|
77
|
-
aggregate_name, event_name
|
78
|
-
)
|
79
|
-
Sequent::Generator::Event.new(aggregate_name, event_name, attrs).execute
|
80
|
-
puts "#{event_name} event has been added to #{aggregate_name}"
|
81
|
-
end
|
82
|
-
|
83
|
-
def generate(args)
|
84
|
-
arguments = args.dup
|
85
|
-
entity = arguments.shift
|
86
|
-
abort('Please specify a command. i.e. `sequent g aggregate user`') if entity.empty?
|
87
|
-
|
88
|
-
case entity
|
89
|
-
when 'aggregate'
|
90
|
-
generate_aggregate(arguments)
|
91
|
-
when 'command'
|
92
|
-
generate_command(arguments)
|
93
|
-
when 'event'
|
94
|
-
generate_event(arguments)
|
95
|
-
else
|
96
|
-
abort("Unknown argument #{entity} for `generate`. Try `sequent g aggregate user`")
|
97
|
-
end
|
98
|
-
end
|
99
|
-
|
100
|
-
def args_valid?(*args)
|
101
|
-
args.all?(&:present?)
|
102
|
-
end
|
103
|
-
|
104
|
-
case command
|
105
|
-
when 'new'
|
106
|
-
new_project(args)
|
107
|
-
when 'generate', 'g'
|
108
|
-
generate(args)
|
109
|
-
else
|
110
|
-
abort("Unknown command #{command}. Try `sequent new myapp`")
|
111
|
-
end
|
9
|
+
exit_code = Sequent::Cli::App.run(ARGV)
|
10
|
+
exit(exit_code)
|
@@ -0,0 +1,120 @@
|
|
1
|
+
-- This script migrates a pre-sequent 8 database to the sequent 8 schema while preserving the data.
|
2
|
+
-- It runs in a single transaction and when completed you can COMMIT or ROLLBACK the results.
|
3
|
+
--
|
4
|
+
-- To adjust the partitioning setup you can modify `./sequent_schema_partitions.sql`. By default
|
5
|
+
-- only a single partition is present for each partitioned table, which works well for smaller
|
6
|
+
-- (e.g. less than 10 Gigabytes) databases.
|
7
|
+
--
|
8
|
+
-- Ensure you test this on a copy of your production system to verify everything works and to
|
9
|
+
-- get an indication of the required downtime for your system.
|
10
|
+
|
11
|
+
\set ECHO all
|
12
|
+
\set ON_ERROR_STOP
|
13
|
+
\timing on
|
14
|
+
|
15
|
+
SELECT clock_timestamp() AS migration_started_at \gset
|
16
|
+
|
17
|
+
\echo Migration started at :migration_started_at
|
18
|
+
|
19
|
+
SET work_mem TO '8MB';
|
20
|
+
SET max_parallel_workers = 8;
|
21
|
+
SET max_parallel_workers_per_gather = 8;
|
22
|
+
SET max_parallel_maintenance_workers = 8;
|
23
|
+
|
24
|
+
BEGIN;
|
25
|
+
|
26
|
+
SET temp_tablespaces = 'pg_default';
|
27
|
+
SET search_path TO sequent_schema;
|
28
|
+
|
29
|
+
ALTER SEQUENCE command_records_id_seq OWNED BY NONE;
|
30
|
+
ALTER SEQUENCE command_records_id_seq RENAME TO commands_id_seq;
|
31
|
+
|
32
|
+
\ir ./sequent_schema_tables.sql
|
33
|
+
\ir ./sequent_schema_partitions.sql
|
34
|
+
|
35
|
+
INSERT INTO aggregate_types (type)
|
36
|
+
SELECT DISTINCT aggregate_type
|
37
|
+
FROM sequent_schema.stream_records
|
38
|
+
ORDER BY 1;
|
39
|
+
|
40
|
+
INSERT INTO event_types (type)
|
41
|
+
SELECT DISTINCT event_type
|
42
|
+
FROM sequent_schema.event_records
|
43
|
+
WHERE event_type <> 'Sequent::Core::SnapshotEvent'
|
44
|
+
ORDER BY 1;
|
45
|
+
|
46
|
+
INSERT INTO command_types (type)
|
47
|
+
SELECT DISTINCT command_type
|
48
|
+
FROM sequent_schema.command_records
|
49
|
+
ORDER BY 1;
|
50
|
+
|
51
|
+
ANALYZE aggregate_types, event_types, command_types;
|
52
|
+
|
53
|
+
INSERT INTO aggregates (aggregate_id, aggregate_type_id, snapshot_threshold, created_at)
|
54
|
+
SELECT aggregate_id, (SELECT t.id FROM aggregate_types t WHERE aggregate_type = t.type), snapshot_threshold, created_at AT TIME ZONE 'Europe/Amsterdam'
|
55
|
+
FROM stream_records;
|
56
|
+
|
57
|
+
WITH e AS MATERIALIZED (
|
58
|
+
SELECT aggregate_id,
|
59
|
+
sequence_number,
|
60
|
+
command_record_id,
|
61
|
+
t.id AS event_type_id,
|
62
|
+
event_json::jsonb - '{aggregate_id,sequence_number}'::text[] AS event_json
|
63
|
+
FROM sequent_schema.event_records e
|
64
|
+
JOIN event_types t ON e.event_type = t.type
|
65
|
+
)
|
66
|
+
INSERT INTO events (aggregate_id, sequence_number, created_at, command_id, event_type_id, event_json)
|
67
|
+
SELECT aggregate_id,
|
68
|
+
sequence_number,
|
69
|
+
(event_json->>'created_at')::timestamptz AS created_at,
|
70
|
+
command_record_id,
|
71
|
+
event_type_id,
|
72
|
+
event_json - 'created_at'
|
73
|
+
FROM e;
|
74
|
+
|
75
|
+
WITH command AS MATERIALIZED (
|
76
|
+
SELECT c.id, created_at,
|
77
|
+
t.id AS command_type_id,
|
78
|
+
command_json::jsonb AS json
|
79
|
+
FROM sequent_schema.command_records c
|
80
|
+
JOIN command_types t ON t.type = c.command_type
|
81
|
+
)
|
82
|
+
INSERT INTO commands (
|
83
|
+
id, created_at, user_id, aggregate_id, command_type_id, command_json,
|
84
|
+
event_aggregate_id, event_sequence_number
|
85
|
+
)
|
86
|
+
SELECT id,
|
87
|
+
COALESCE((json->>'created_at')::timestamptz, created_at AT TIME ZONE 'Europe/Amsterdam'),
|
88
|
+
(json->>'user_id')::uuid,
|
89
|
+
(json->>'aggregate_id')::uuid,
|
90
|
+
command_type_id,
|
91
|
+
json - '{created_at,user_id,aggregate_id,event_aggregate_id,event_sequence_number}'::text[],
|
92
|
+
(json->>'event_aggregate_id')::uuid,
|
93
|
+
(json->>'event_sequence_number')::integer
|
94
|
+
FROM command;
|
95
|
+
|
96
|
+
INSERT INTO aggregates_that_need_snapshots (aggregate_id, snapshot_sequence_number_high_water_mark, snapshot_outdated_at)
|
97
|
+
SELECT aggregate_id, MAX(sequence_number), NOW()
|
98
|
+
FROM event_records
|
99
|
+
WHERE event_type = 'Sequent::Core::SnapshotEvent'
|
100
|
+
GROUP BY 1
|
101
|
+
ORDER BY 1;
|
102
|
+
|
103
|
+
ALTER TABLE command_records RENAME TO old_command_records;
|
104
|
+
ALTER TABLE event_records RENAME TO old_event_records;
|
105
|
+
ALTER TABLE stream_records RENAME TO old_stream_records;
|
106
|
+
|
107
|
+
\ir ./sequent_schema_indexes.sql
|
108
|
+
|
109
|
+
\set ECHO none
|
110
|
+
|
111
|
+
\ir ./sequent_pgsql.sql
|
112
|
+
|
113
|
+
\set ECHO all
|
114
|
+
|
115
|
+
SELECT clock_timestamp() AS migration_completed_at,
|
116
|
+
clock_timestamp() - :'migration_started_at'::timestamptz AS migration_duration \gset
|
117
|
+
|
118
|
+
\echo Migration complated in :migration_duration (started at :migration_started_at, completed at :migration_completed_at)
|
119
|
+
|
120
|
+
\echo execute ROLLBACK to abort, COMMIT to commit followed by VACUUM VERBOSE ANALYZE to ensure good performance
|
@@ -0,0 +1,416 @@
|
|
1
|
+
DROP TYPE IF EXISTS aggregate_event_type CASCADE;
|
2
|
+
CREATE TYPE aggregate_event_type AS (
|
3
|
+
aggregate_type text,
|
4
|
+
aggregate_id uuid,
|
5
|
+
events_partition_key text,
|
6
|
+
event_type text,
|
7
|
+
event_json jsonb
|
8
|
+
);
|
9
|
+
|
10
|
+
CREATE OR REPLACE FUNCTION enrich_command_json(command commands) RETURNS jsonb
|
11
|
+
LANGUAGE plpgsql AS $$
|
12
|
+
BEGIN
|
13
|
+
RETURN jsonb_build_object(
|
14
|
+
'command_type', (SELECT type FROM command_types WHERE command_types.id = command.command_type_id),
|
15
|
+
'created_at', command.created_at,
|
16
|
+
'user_id', command.user_id,
|
17
|
+
'aggregate_id', command.aggregate_id,
|
18
|
+
'event_aggregate_id', command.event_aggregate_id,
|
19
|
+
'event_sequence_number', command.event_sequence_number
|
20
|
+
)
|
21
|
+
|| command.command_json;
|
22
|
+
END
|
23
|
+
$$;
|
24
|
+
|
25
|
+
CREATE OR REPLACE FUNCTION enrich_event_json(event events) RETURNS jsonb
|
26
|
+
LANGUAGE plpgsql AS $$
|
27
|
+
BEGIN
|
28
|
+
RETURN jsonb_build_object(
|
29
|
+
'aggregate_id', event.aggregate_id,
|
30
|
+
'sequence_number', event.sequence_number,
|
31
|
+
'created_at', event.created_at
|
32
|
+
)
|
33
|
+
|| event.event_json;
|
34
|
+
END
|
35
|
+
$$;
|
36
|
+
|
37
|
+
CREATE OR REPLACE FUNCTION load_event(
|
38
|
+
_aggregate_id uuid,
|
39
|
+
_sequence_number integer
|
40
|
+
) RETURNS SETOF aggregate_event_type
|
41
|
+
LANGUAGE plpgsql AS $$
|
42
|
+
BEGIN
|
43
|
+
RETURN QUERY SELECT aggregate_types.type,
|
44
|
+
a.aggregate_id,
|
45
|
+
a.events_partition_key,
|
46
|
+
event_types.type,
|
47
|
+
enrich_event_json(e)
|
48
|
+
FROM aggregates a
|
49
|
+
INNER JOIN events e ON (a.events_partition_key, a.aggregate_id) = (e.partition_key, e.aggregate_id)
|
50
|
+
INNER JOIN aggregate_types ON a.aggregate_type_id = aggregate_types.id
|
51
|
+
INNER JOIN event_types ON e.event_type_id = event_types.id
|
52
|
+
WHERE a.aggregate_id = _aggregate_id
|
53
|
+
AND e.sequence_number = _sequence_number;
|
54
|
+
END;
|
55
|
+
$$;
|
56
|
+
|
57
|
+
CREATE OR REPLACE FUNCTION load_events(
|
58
|
+
_aggregate_ids jsonb,
|
59
|
+
_use_snapshots boolean DEFAULT TRUE,
|
60
|
+
_until timestamptz DEFAULT NULL
|
61
|
+
) RETURNS SETOF aggregate_event_type
|
62
|
+
LANGUAGE plpgsql AS $$
|
63
|
+
DECLARE
|
64
|
+
_aggregate_id aggregates.aggregate_id%TYPE;
|
65
|
+
BEGIN
|
66
|
+
FOR _aggregate_id IN SELECT * FROM jsonb_array_elements_text(_aggregate_ids) LOOP
|
67
|
+
-- Use a single query to avoid race condition with UPDATEs to the events partition key
|
68
|
+
-- in case transaction isolation level is lower than repeatable read (the default of
|
69
|
+
-- PostgreSQL is read committed).
|
70
|
+
RETURN QUERY WITH
|
71
|
+
aggregate AS (
|
72
|
+
SELECT aggregate_types.type, aggregate_id, events_partition_key
|
73
|
+
FROM aggregates
|
74
|
+
JOIN aggregate_types ON aggregate_type_id = aggregate_types.id
|
75
|
+
WHERE aggregate_id = _aggregate_id
|
76
|
+
),
|
77
|
+
snapshot AS (
|
78
|
+
SELECT *
|
79
|
+
FROM snapshot_records
|
80
|
+
WHERE _use_snapshots
|
81
|
+
AND aggregate_id = _aggregate_id
|
82
|
+
AND (_until IS NULL OR created_at < _until)
|
83
|
+
ORDER BY sequence_number DESC LIMIT 1
|
84
|
+
)
|
85
|
+
(SELECT a.*, s.snapshot_type, s.snapshot_json FROM aggregate a, snapshot s)
|
86
|
+
UNION ALL
|
87
|
+
(SELECT a.*, event_types.type, enrich_event_json(e)
|
88
|
+
FROM aggregate a
|
89
|
+
JOIN events e ON (a.events_partition_key, a.aggregate_id) = (e.partition_key, e.aggregate_id)
|
90
|
+
JOIN event_types ON e.event_type_id = event_types.id
|
91
|
+
WHERE e.sequence_number >= COALESCE((SELECT sequence_number FROM snapshot), 0)
|
92
|
+
AND (_until IS NULL OR e.created_at < _until)
|
93
|
+
ORDER BY e.sequence_number ASC);
|
94
|
+
END LOOP;
|
95
|
+
END;
|
96
|
+
$$;
|
97
|
+
|
98
|
+
CREATE OR REPLACE FUNCTION store_command(_command jsonb) RETURNS bigint
|
99
|
+
LANGUAGE plpgsql AS $$
|
100
|
+
DECLARE
|
101
|
+
_id commands.id%TYPE;
|
102
|
+
_command_json jsonb = _command->'command_json';
|
103
|
+
BEGIN
|
104
|
+
IF NOT EXISTS (SELECT 1 FROM command_types t WHERE t.type = _command->>'command_type') THEN
|
105
|
+
-- Only try inserting if it doesn't exist to avoid exhausting the id sequence
|
106
|
+
INSERT INTO command_types (type)
|
107
|
+
VALUES (_command->>'command_type')
|
108
|
+
ON CONFLICT DO NOTHING;
|
109
|
+
END IF;
|
110
|
+
|
111
|
+
INSERT INTO commands (
|
112
|
+
created_at, user_id, aggregate_id, command_type_id, command_json,
|
113
|
+
event_aggregate_id, event_sequence_number
|
114
|
+
) VALUES (
|
115
|
+
(_command->>'created_at')::timestamptz,
|
116
|
+
(_command_json->>'user_id')::uuid,
|
117
|
+
(_command_json->>'aggregate_id')::uuid,
|
118
|
+
(SELECT id FROM command_types WHERE type = _command->>'command_type'),
|
119
|
+
(_command->'command_json') - '{command_type,created_at,organization_id,user_id,aggregate_id,event_aggregate_id,event_sequence_number}'::text[],
|
120
|
+
(_command_json->>'event_aggregate_id')::uuid,
|
121
|
+
NULLIF(_command_json->'event_sequence_number', 'null'::jsonb)::integer
|
122
|
+
) RETURNING id INTO STRICT _id;
|
123
|
+
RETURN _id;
|
124
|
+
END;
|
125
|
+
$$;
|
126
|
+
|
127
|
+
CREATE OR REPLACE PROCEDURE store_events(_command jsonb, _aggregates_with_events jsonb)
|
128
|
+
LANGUAGE plpgsql AS $$
|
129
|
+
DECLARE
|
130
|
+
_command_id commands.id%TYPE;
|
131
|
+
_aggregate jsonb;
|
132
|
+
_events jsonb;
|
133
|
+
_aggregate_id aggregates.aggregate_id%TYPE;
|
134
|
+
_aggregate_row aggregates%ROWTYPE;
|
135
|
+
_provided_events_partition_key aggregates.events_partition_key%TYPE;
|
136
|
+
_events_partition_key aggregates.events_partition_key%TYPE;
|
137
|
+
_snapshot_outdated_at aggregates_that_need_snapshots.snapshot_outdated_at%TYPE;
|
138
|
+
BEGIN
|
139
|
+
_command_id = store_command(_command);
|
140
|
+
|
141
|
+
WITH types AS (
|
142
|
+
SELECT DISTINCT row->0->>'aggregate_type' AS type
|
143
|
+
FROM jsonb_array_elements(_aggregates_with_events) AS row
|
144
|
+
)
|
145
|
+
INSERT INTO aggregate_types (type)
|
146
|
+
SELECT type FROM types
|
147
|
+
WHERE type NOT IN (SELECT type FROM aggregate_types)
|
148
|
+
ORDER BY 1
|
149
|
+
ON CONFLICT DO NOTHING;
|
150
|
+
|
151
|
+
WITH types AS (
|
152
|
+
SELECT DISTINCT events->>'event_type' AS type
|
153
|
+
FROM jsonb_array_elements(_aggregates_with_events) AS row
|
154
|
+
CROSS JOIN LATERAL jsonb_array_elements(row->1) AS events
|
155
|
+
)
|
156
|
+
INSERT INTO event_types (type)
|
157
|
+
SELECT type FROM types
|
158
|
+
WHERE type NOT IN (SELECT type FROM event_types)
|
159
|
+
ORDER BY 1
|
160
|
+
ON CONFLICT DO NOTHING;
|
161
|
+
|
162
|
+
FOR _aggregate, _events IN SELECT row->0, row->1 FROM jsonb_array_elements(_aggregates_with_events) AS row
|
163
|
+
ORDER BY row->0->'aggregate_id', row->1->0->'event_json'->'sequence_number'
|
164
|
+
LOOP
|
165
|
+
_aggregate_id = _aggregate->>'aggregate_id';
|
166
|
+
_provided_events_partition_key = _aggregate->>'events_partition_key';
|
167
|
+
_snapshot_outdated_at = _aggregate->>'snapshot_outdated_at';
|
168
|
+
|
169
|
+
SELECT * INTO _aggregate_row FROM aggregates WHERE aggregate_id = _aggregate_id;
|
170
|
+
_events_partition_key = COALESCE(_provided_events_partition_key, _aggregate_row.events_partition_key, '');
|
171
|
+
|
172
|
+
INSERT INTO aggregates (aggregate_id, created_at, aggregate_type_id, events_partition_key)
|
173
|
+
VALUES (
|
174
|
+
_aggregate_id,
|
175
|
+
(_events->0->>'created_at')::timestamptz,
|
176
|
+
(SELECT id FROM aggregate_types WHERE type = _aggregate->>'aggregate_type'),
|
177
|
+
_events_partition_key
|
178
|
+
) ON CONFLICT (aggregate_id)
|
179
|
+
DO UPDATE SET events_partition_key = EXCLUDED.events_partition_key
|
180
|
+
WHERE aggregates.events_partition_key IS DISTINCT FROM EXCLUDED.events_partition_key;
|
181
|
+
|
182
|
+
INSERT INTO events (partition_key, aggregate_id, sequence_number, created_at, command_id, event_type_id, event_json)
|
183
|
+
SELECT _events_partition_key,
|
184
|
+
_aggregate_id,
|
185
|
+
(event->'event_json'->'sequence_number')::integer,
|
186
|
+
(event->>'created_at')::timestamptz,
|
187
|
+
_command_id,
|
188
|
+
(SELECT id FROM event_types WHERE type = event->>'event_type'),
|
189
|
+
(event->'event_json') - '{aggregate_id,created_at,event_type,sequence_number}'::text[]
|
190
|
+
FROM jsonb_array_elements(_events) AS event;
|
191
|
+
|
192
|
+
IF _snapshot_outdated_at IS NOT NULL THEN
|
193
|
+
INSERT INTO aggregates_that_need_snapshots AS row (aggregate_id, snapshot_outdated_at)
|
194
|
+
VALUES (_aggregate_id, _snapshot_outdated_at)
|
195
|
+
ON CONFLICT (aggregate_id) DO UPDATE
|
196
|
+
SET snapshot_outdated_at = LEAST(row.snapshot_outdated_at, EXCLUDED.snapshot_outdated_at)
|
197
|
+
WHERE row.snapshot_outdated_at IS DISTINCT FROM EXCLUDED.snapshot_outdated_at;
|
198
|
+
END IF;
|
199
|
+
END LOOP;
|
200
|
+
END;
|
201
|
+
$$;
|
202
|
+
|
203
|
+
CREATE OR REPLACE PROCEDURE store_snapshots(_snapshots jsonb)
|
204
|
+
LANGUAGE plpgsql AS $$
|
205
|
+
DECLARE
|
206
|
+
_aggregate_id uuid;
|
207
|
+
_snapshot jsonb;
|
208
|
+
_sequence_number snapshot_records.sequence_number%TYPE;
|
209
|
+
BEGIN
|
210
|
+
FOR _snapshot IN SELECT * FROM jsonb_array_elements(_snapshots) LOOP
|
211
|
+
_aggregate_id = _snapshot->>'aggregate_id';
|
212
|
+
_sequence_number = _snapshot->'sequence_number';
|
213
|
+
|
214
|
+
INSERT INTO aggregates_that_need_snapshots AS row (aggregate_id, snapshot_sequence_number_high_water_mark)
|
215
|
+
VALUES (_aggregate_id, _sequence_number)
|
216
|
+
ON CONFLICT (aggregate_id) DO UPDATE
|
217
|
+
SET snapshot_sequence_number_high_water_mark =
|
218
|
+
GREATEST(row.snapshot_sequence_number_high_water_mark, EXCLUDED.snapshot_sequence_number_high_water_mark),
|
219
|
+
snapshot_outdated_at = NULL,
|
220
|
+
snapshot_scheduled_at = NULL;
|
221
|
+
|
222
|
+
INSERT INTO snapshot_records (aggregate_id, sequence_number, created_at, snapshot_type, snapshot_json)
|
223
|
+
VALUES (
|
224
|
+
_aggregate_id,
|
225
|
+
_sequence_number,
|
226
|
+
(_snapshot->>'created_at')::timestamptz,
|
227
|
+
_snapshot->>'snapshot_type',
|
228
|
+
_snapshot->'snapshot_json'
|
229
|
+
);
|
230
|
+
END LOOP;
|
231
|
+
END;
|
232
|
+
$$;
|
233
|
+
|
234
|
+
CREATE OR REPLACE FUNCTION load_latest_snapshot(_aggregate_id uuid) RETURNS aggregate_event_type
|
235
|
+
LANGUAGE SQL AS $$
|
236
|
+
SELECT (SELECT type FROM aggregate_types WHERE id = a.aggregate_type_id),
|
237
|
+
a.aggregate_id,
|
238
|
+
a.events_partition_key,
|
239
|
+
s.snapshot_type,
|
240
|
+
s.snapshot_json
|
241
|
+
FROM aggregates a JOIN snapshot_records s ON a.aggregate_id = s.aggregate_id
|
242
|
+
WHERE a.aggregate_id = _aggregate_id
|
243
|
+
ORDER BY s.sequence_number DESC
|
244
|
+
LIMIT 1;
|
245
|
+
$$;
|
246
|
+
|
247
|
+
CREATE OR REPLACE PROCEDURE delete_all_snapshots(_now timestamp with time zone DEFAULT NOW())
|
248
|
+
LANGUAGE plpgsql AS $$
|
249
|
+
BEGIN
|
250
|
+
UPDATE aggregates_that_need_snapshots
|
251
|
+
SET snapshot_outdated_at = _now
|
252
|
+
WHERE snapshot_outdated_at IS NULL;
|
253
|
+
DELETE FROM snapshot_records;
|
254
|
+
END;
|
255
|
+
$$;
|
256
|
+
|
257
|
+
CREATE OR REPLACE PROCEDURE delete_snapshots_before(_aggregate_id uuid, _sequence_number integer, _now timestamp with time zone DEFAULT NOW())
|
258
|
+
LANGUAGE plpgsql AS $$
|
259
|
+
BEGIN
|
260
|
+
DELETE FROM snapshot_records
|
261
|
+
WHERE aggregate_id = _aggregate_id
|
262
|
+
AND sequence_number < _sequence_number;
|
263
|
+
|
264
|
+
UPDATE aggregates_that_need_snapshots
|
265
|
+
SET snapshot_outdated_at = _now
|
266
|
+
WHERE aggregate_id = _aggregate_id
|
267
|
+
AND snapshot_outdated_at IS NULL
|
268
|
+
AND NOT EXISTS (SELECT 1 FROM snapshot_records WHERE aggregate_id = _aggregate_id);
|
269
|
+
END;
|
270
|
+
$$;
|
271
|
+
|
272
|
+
CREATE OR REPLACE FUNCTION aggregates_that_need_snapshots(_last_aggregate_id uuid, _limit integer)
|
273
|
+
RETURNS TABLE (aggregate_id uuid)
|
274
|
+
LANGUAGE plpgsql AS $$
|
275
|
+
BEGIN
|
276
|
+
RETURN QUERY SELECT a.aggregate_id
|
277
|
+
FROM aggregates_that_need_snapshots a
|
278
|
+
WHERE a.snapshot_outdated_at IS NOT NULL
|
279
|
+
AND (_last_aggregate_id IS NULL OR a.aggregate_id > _last_aggregate_id)
|
280
|
+
ORDER BY 1
|
281
|
+
LIMIT _limit;
|
282
|
+
END;
|
283
|
+
$$;
|
284
|
+
|
285
|
+
CREATE OR REPLACE FUNCTION select_aggregates_for_snapshotting(_limit integer, _reschedule_snapshot_scheduled_before timestamp with time zone, _now timestamp with time zone DEFAULT NOW())
|
286
|
+
RETURNS TABLE (aggregate_id uuid)
|
287
|
+
LANGUAGE plpgsql AS $$
|
288
|
+
BEGIN
|
289
|
+
RETURN QUERY WITH scheduled AS MATERIALIZED (
|
290
|
+
SELECT a.aggregate_id
|
291
|
+
FROM aggregates_that_need_snapshots AS a
|
292
|
+
WHERE snapshot_outdated_at IS NOT NULL
|
293
|
+
ORDER BY snapshot_outdated_at ASC, snapshot_sequence_number_high_water_mark DESC, aggregate_id ASC
|
294
|
+
LIMIT _limit
|
295
|
+
FOR UPDATE
|
296
|
+
) UPDATE aggregates_that_need_snapshots AS row
|
297
|
+
SET snapshot_scheduled_at = _now
|
298
|
+
FROM scheduled
|
299
|
+
WHERE row.aggregate_id = scheduled.aggregate_id
|
300
|
+
AND (row.snapshot_scheduled_at IS NULL OR row.snapshot_scheduled_at < _reschedule_snapshot_scheduled_before)
|
301
|
+
RETURNING row.aggregate_id;
|
302
|
+
END;
|
303
|
+
$$;
|
304
|
+
|
305
|
+
CREATE OR REPLACE PROCEDURE permanently_delete_commands_without_events(_aggregate_id uuid, _organization_id uuid)
|
306
|
+
LANGUAGE plpgsql AS $$
|
307
|
+
BEGIN
|
308
|
+
IF _aggregate_id IS NULL AND _organization_id IS NULL THEN
|
309
|
+
RAISE EXCEPTION 'aggregate_id or organization_id must be specified to delete commands';
|
310
|
+
END IF;
|
311
|
+
|
312
|
+
DELETE FROM commands
|
313
|
+
WHERE (_aggregate_id IS NULL OR aggregate_id = _aggregate_id)
|
314
|
+
AND NOT EXISTS (SELECT 1 FROM events WHERE command_id = commands.id);
|
315
|
+
END;
|
316
|
+
$$;
|
317
|
+
|
318
|
+
CREATE OR REPLACE PROCEDURE permanently_delete_event_streams(_aggregate_ids jsonb)
|
319
|
+
LANGUAGE plpgsql AS $$
|
320
|
+
BEGIN
|
321
|
+
DELETE FROM events
|
322
|
+
USING jsonb_array_elements_text(_aggregate_ids) AS ids (id)
|
323
|
+
JOIN aggregates ON ids.id::uuid = aggregates.aggregate_id
|
324
|
+
WHERE events.partition_key = aggregates.events_partition_key
|
325
|
+
AND events.aggregate_id = aggregates.aggregate_id;
|
326
|
+
DELETE FROM aggregates
|
327
|
+
USING jsonb_array_elements_text(_aggregate_ids) AS ids (id)
|
328
|
+
WHERE aggregates.aggregate_id = ids.id::uuid;
|
329
|
+
END;
|
330
|
+
$$;
|
331
|
+
|
332
|
+
DROP VIEW IF EXISTS command_records;
|
333
|
+
CREATE VIEW command_records (id, user_id, aggregate_id, command_type, command_json, created_at, event_aggregate_id, event_sequence_number) AS
|
334
|
+
SELECT id,
|
335
|
+
user_id,
|
336
|
+
aggregate_id,
|
337
|
+
(SELECT type FROM command_types WHERE command_types.id = command.command_type_id),
|
338
|
+
enrich_command_json(command),
|
339
|
+
created_at,
|
340
|
+
event_aggregate_id,
|
341
|
+
event_sequence_number
|
342
|
+
FROM commands command;
|
343
|
+
|
344
|
+
DROP VIEW IF EXISTS event_records;
|
345
|
+
CREATE VIEW event_records (aggregate_id, partition_key, sequence_number, created_at, event_type, event_json, command_record_id, xact_id) AS
|
346
|
+
SELECT aggregate.aggregate_id,
|
347
|
+
event.partition_key,
|
348
|
+
event.sequence_number,
|
349
|
+
event.created_at,
|
350
|
+
type.type,
|
351
|
+
enrich_event_json(event) AS event_json,
|
352
|
+
command_id,
|
353
|
+
event.xact_id
|
354
|
+
FROM events event
|
355
|
+
JOIN aggregates aggregate ON aggregate.aggregate_id = event.aggregate_id AND aggregate.events_partition_key = event.partition_key
|
356
|
+
JOIN event_types type ON event.event_type_id = type.id;
|
357
|
+
|
358
|
+
DROP VIEW IF EXISTS stream_records;
|
359
|
+
CREATE VIEW stream_records (aggregate_id, events_partition_key, aggregate_type, created_at) AS
|
360
|
+
SELECT aggregates.aggregate_id,
|
361
|
+
aggregates.events_partition_key,
|
362
|
+
aggregate_types.type,
|
363
|
+
aggregates.created_at
|
364
|
+
FROM aggregates JOIN aggregate_types ON aggregates.aggregate_type_id = aggregate_types.id;
|
365
|
+
|
366
|
+
CREATE OR REPLACE FUNCTION save_events_on_delete_trigger() RETURNS TRIGGER AS $$
|
367
|
+
BEGIN
|
368
|
+
INSERT INTO saved_event_records (operation, timestamp, "user", aggregate_id, partition_key, sequence_number, created_at, event_type, event_json, command_id, xact_id)
|
369
|
+
SELECT 'D',
|
370
|
+
statement_timestamp(),
|
371
|
+
user,
|
372
|
+
o.aggregate_id,
|
373
|
+
o.partition_key,
|
374
|
+
o.sequence_number,
|
375
|
+
o.created_at,
|
376
|
+
(SELECT type FROM event_types WHERE event_types.id = o.event_type_id),
|
377
|
+
o.event_json,
|
378
|
+
o.command_id,
|
379
|
+
o.xact_id
|
380
|
+
FROM old_table o;
|
381
|
+
RETURN NULL;
|
382
|
+
END;
|
383
|
+
$$ LANGUAGE plpgsql;
|
384
|
+
|
385
|
+
CREATE OR REPLACE FUNCTION save_events_on_update_trigger() RETURNS TRIGGER AS $$
|
386
|
+
BEGIN
|
387
|
+
INSERT INTO saved_event_records (operation, timestamp, "user", aggregate_id, partition_key, sequence_number, created_at, event_type, event_json, command_id, xact_id)
|
388
|
+
SELECT 'U',
|
389
|
+
statement_timestamp(),
|
390
|
+
user,
|
391
|
+
o.aggregate_id,
|
392
|
+
o.partition_key,
|
393
|
+
o.sequence_number,
|
394
|
+
o.created_at,
|
395
|
+
(SELECT type FROM event_types WHERE event_types.id = o.event_type_id),
|
396
|
+
o.event_json,
|
397
|
+
o.command_id,
|
398
|
+
o.xact_id
|
399
|
+
FROM old_table o LEFT JOIN new_table n ON o.aggregate_id = n.aggregate_id AND o.sequence_number = n.sequence_number
|
400
|
+
WHERE n IS NULL
|
401
|
+
-- Only save when event related information changes
|
402
|
+
OR o.created_at <> n.created_at
|
403
|
+
OR o.event_type_id <> n.event_type_id
|
404
|
+
OR o.event_json <> n.event_json;
|
405
|
+
RETURN NULL;
|
406
|
+
END;
|
407
|
+
$$ LANGUAGE plpgsql;
|
408
|
+
|
409
|
+
CREATE OR REPLACE TRIGGER save_events_on_delete_trigger
|
410
|
+
AFTER DELETE ON events
|
411
|
+
REFERENCING OLD TABLE AS old_table
|
412
|
+
FOR EACH STATEMENT EXECUTE FUNCTION save_events_on_delete_trigger();
|
413
|
+
CREATE OR REPLACE TRIGGER save_events_on_update_trigger
|
414
|
+
AFTER UPDATE ON events
|
415
|
+
REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table
|
416
|
+
FOR EACH STATEMENT EXECUTE FUNCTION save_events_on_update_trigger();
|