pg_easy_replicate 0.2.7 → 0.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Gemfile.lock +11 -2
- data/README.md +54 -2
- data/docker-compose.yml +2 -0
- data/lib/pg_easy_replicate/cli.rb +79 -13
- data/lib/pg_easy_replicate/ddl_audit.rb +256 -0
- data/lib/pg_easy_replicate/ddl_manager.rb +56 -0
- data/lib/pg_easy_replicate/orchestrate.rb +33 -38
- data/lib/pg_easy_replicate/version.rb +1 -1
- data/lib/pg_easy_replicate.rb +11 -0
- data/scripts/e2e-start.sh +10 -2
- metadata +18 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 8b12c7664573414ae5b4421e16c2a4d603a1550abd52cd677fa97c0a3f4187d3
|
4
|
+
data.tar.gz: 03baf5fa2d60d841475024d8e6a4e822546a5f5180ddcdead5ee33ed89bc472b
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c18024373cdf4313723599eaee6f57d84b99ff52afffbbab3a393bae9e453ba43d300f8ec3424396af12fcbf40de46c92846110777a7a6d4ebb6fe9adae47f5f
|
7
|
+
data.tar.gz: 8f4e8dc80d25b072b6e44a4e17fc6d367fe27368a9643e4e0cc4a39dd846cd1abf247aedbc6e887b1285293939f967131e0d5254ae53751f584af2e13a72c783
|
data/Gemfile.lock
CHANGED
@@ -1,9 +1,10 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
pg_easy_replicate (0.
|
4
|
+
pg_easy_replicate (0.3.0)
|
5
5
|
ougai (~> 2.0.0)
|
6
6
|
pg (~> 1.5.3)
|
7
|
+
pg_query (~> 5.1.0)
|
7
8
|
sequel (>= 5.69, < 5.83)
|
8
9
|
thor (>= 1.2.2, < 1.4.0)
|
9
10
|
|
@@ -14,6 +15,12 @@ GEM
|
|
14
15
|
bigdecimal (3.1.8)
|
15
16
|
coderay (1.1.3)
|
16
17
|
diff-lcs (1.5.1)
|
18
|
+
google-protobuf (4.28.0-arm64-darwin)
|
19
|
+
bigdecimal
|
20
|
+
rake (>= 13)
|
21
|
+
google-protobuf (4.28.0-x86_64-linux)
|
22
|
+
bigdecimal
|
23
|
+
rake (>= 13)
|
17
24
|
haml (6.1.1)
|
18
25
|
temple (>= 0.8.2)
|
19
26
|
thor
|
@@ -29,6 +36,8 @@ GEM
|
|
29
36
|
ast (~> 2.4.1)
|
30
37
|
racc
|
31
38
|
pg (1.5.7)
|
39
|
+
pg_query (5.1.0)
|
40
|
+
google-protobuf (>= 3.22.3)
|
32
41
|
prettier_print (1.2.1)
|
33
42
|
pry (0.14.2)
|
34
43
|
coderay (~> 1.1)
|
@@ -38,7 +47,7 @@ GEM
|
|
38
47
|
rake (13.2.1)
|
39
48
|
rbs (3.1.0)
|
40
49
|
regexp_parser (2.9.2)
|
41
|
-
rexml (3.3.
|
50
|
+
rexml (3.3.6)
|
42
51
|
strscan
|
43
52
|
rspec (3.13.0)
|
44
53
|
rspec-core (~> 3.13.0)
|
data/README.md
CHANGED
@@ -69,11 +69,13 @@ All [Logical Replication Restrictions](https://www.postgresql.org/docs/current/l
|
|
69
69
|
|
70
70
|
## Usage
|
71
71
|
|
72
|
-
Ensure `SOURCE_DB_URL` and `TARGET_DB_URL` are present as environment variables in the runtime environment.
|
72
|
+
Ensure `SOURCE_DB_URL` and `TARGET_DB_URL` are present as environment variables in the runtime environment.
|
73
|
+
|
73
74
|
- `SOURCE_DB_URL` = The database that you want to replicate FROM.
|
74
75
|
- `TARGET_DB_URL` = The database that you want to replicate TO.
|
75
76
|
|
76
77
|
The URL should be in postgres connection string format. Example:
|
78
|
+
|
77
79
|
```bash
|
78
80
|
$ export SOURCE_DB_URL="postgres://USERNAME:PASSWORD@localhost:5432/DATABASE_NAME"
|
79
81
|
$ export TARGET_DB_URL="postgres://USERNAME:PASSWORD@localhost:5433/DATABASE_NAME"
|
@@ -174,12 +176,62 @@ Once the bootstrap is complete, you can start the sync. Starting the sync sets u
|
|
174
176
|
**NOTE**: Start sync by default will drop all indices in the target database for performance reasons. And will automatically re-add the indices during `switchover`. It is turned on by default and you can opt out of this with `--no-recreate-indices-post-copy`
|
175
177
|
|
176
178
|
```bash
|
177
|
-
$ pg_easy_replicate start_sync --group-name database-cluster-1
|
179
|
+
$ pg_easy_replicate start_sync --group-name database-cluster-1 [-d <track-ddl>]
|
178
180
|
|
179
181
|
{"name":"pg_easy_replicate","hostname":"PKHXQVK6DW","pid":22113,"level":30,"time":"2023-06-19T15:54:54.874-04:00","v":0,"msg":"Setting up publication","publication_name":"pger_publication_database_cluster_1","version":"0.1.0"}
|
180
182
|
...
|
181
183
|
```
|
182
184
|
|
185
|
+
### DDL Changes Management
|
186
|
+
|
187
|
+
`pg_easy_replicate` now supports tracking and applying DDL (Data Definition Language) changes between the source and target databases. To track DDLs you can pass `-track-ddl` to `start_sync`.
|
188
|
+
|
189
|
+
This feature ensures that most schema changes made to the source database tables that are being replicated during the replication process are tracked, so that you can apply them at your will before or after switchover.
|
190
|
+
|
191
|
+
#### Listing DDL Changes
|
192
|
+
|
193
|
+
To view the DDL changes that have been tracked:
|
194
|
+
|
195
|
+
```bash
|
196
|
+
$ pg_easy_replicate list_ddl_changes -g <group-name> [-l <limit>]
|
197
|
+
```
|
198
|
+
|
199
|
+
This command will display a list of DDL changes in JSON format;
|
200
|
+
|
201
|
+
```
|
202
|
+
[
|
203
|
+
{
|
204
|
+
"id": 1,
|
205
|
+
"group_name": "cluster-1",
|
206
|
+
"event_type": "ddl_command_end",
|
207
|
+
"object_type": "table",
|
208
|
+
"object_identity": "public.pgbench_accounts",
|
209
|
+
"ddl_command": "ALTER TABLE public.pgbench_accounts ADD COLUMN test_column VARCHAR(255)",
|
210
|
+
"created_at": "2024-08-31 15:42:33 UTC"
|
211
|
+
}
|
212
|
+
]
|
213
|
+
```
|
214
|
+
|
215
|
+
#### Applying DDL Changes
|
216
|
+
|
217
|
+
`pg_easy_replicate` won't automatically apply the changes for you. To apply the tracked DDL changes to the target database:
|
218
|
+
|
219
|
+
```bash
|
220
|
+
$ pg_easy_replicate apply_ddl_change -g <group-name> [-i <change-id>]
|
221
|
+
```
|
222
|
+
|
223
|
+
If you specify a change ID with the `-i` option, only that specific change will be applied. If you don't specify an ID, you'll be prompted to apply all pending changes.
|
224
|
+
|
225
|
+
```bash
|
226
|
+
$ pg_easy_replicate apply_ddl_change -g cluster-1
|
227
|
+
The following DDL changes will be applied:
|
228
|
+
ID: 1, Type: table, Command: ALTER TABLE public.pgbench_accounts ADD COLUMN test_column VARCHAR(255)...
|
229
|
+
|
230
|
+
Do you want to apply all these changes? (y/n): y
|
231
|
+
...
|
232
|
+
All pending DDL changes applied successfully.
|
233
|
+
```
|
234
|
+
|
183
235
|
### Stats
|
184
236
|
|
185
237
|
You can inspect or watch stats any time during the sync process. The stats give you an idea of when the sync started, current flush/write lag, how many tables are in `replicating`, `copying` or other stages, and more.
|
data/docker-compose.yml
CHANGED
@@ -9,6 +9,7 @@ services:
|
|
9
9
|
POSTGRES_PASSWORD: james-bond123@7!'3aaR
|
10
10
|
POSTGRES_DB: postgres-db
|
11
11
|
command: >
|
12
|
+
-c max_connections=200
|
12
13
|
-c wal_level=logical
|
13
14
|
-c ssl=on
|
14
15
|
-c ssl_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem
|
@@ -25,6 +26,7 @@ services:
|
|
25
26
|
POSTGRES_PASSWORD: james-bond123@7!'3aaR
|
26
27
|
POSTGRES_DB: postgres-db
|
27
28
|
command: >
|
29
|
+
-c max_connections=200
|
28
30
|
-c wal_level=logical
|
29
31
|
-c ssl=on
|
30
32
|
-c ssl_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem
|
@@ -54,6 +54,11 @@ module PgEasyReplicate
|
|
54
54
|
aliases: "-c",
|
55
55
|
boolean: true,
|
56
56
|
desc: "Copy schema to the new database"
|
57
|
+
method_option :track_ddl,
|
58
|
+
aliases: "-d",
|
59
|
+
type: :boolean,
|
60
|
+
default: false,
|
61
|
+
desc: "Enable DDL tracking for the group"
|
57
62
|
desc "bootstrap",
|
58
63
|
"Sets up temporary tables for information required during runtime"
|
59
64
|
def bootstrap
|
@@ -83,11 +88,6 @@ module PgEasyReplicate
|
|
83
88
|
aliases: "-g",
|
84
89
|
required: true,
|
85
90
|
desc: "Name of the group to provision"
|
86
|
-
method_option :group_name,
|
87
|
-
aliases: "-g",
|
88
|
-
required: true,
|
89
|
-
desc:
|
90
|
-
"Name of the grouping for this collection of source and target DB"
|
91
91
|
method_option :schema_name,
|
92
92
|
aliases: "-s",
|
93
93
|
desc:
|
@@ -104,10 +104,14 @@ module PgEasyReplicate
|
|
104
104
|
"Comma separated list of table names to exclude. Default: None"
|
105
105
|
method_option :recreate_indices_post_copy,
|
106
106
|
type: :boolean,
|
107
|
-
default:
|
107
|
+
default: false,
|
108
108
|
aliases: "-r",
|
109
109
|
desc:
|
110
110
|
"Drop all non-primary indices before copy and recreate them post-copy"
|
111
|
+
method_option :track_ddl,
|
112
|
+
type: :boolean,
|
113
|
+
default: false,
|
114
|
+
desc: "Enable DDL tracking for the group"
|
111
115
|
def start_sync
|
112
116
|
PgEasyReplicate::Orchestrate.start_sync(options)
|
113
117
|
end
|
@@ -122,7 +126,7 @@ module PgEasyReplicate
|
|
122
126
|
PgEasyReplicate::Orchestrate.stop_sync(group_name: options[:group_name])
|
123
127
|
end
|
124
128
|
|
125
|
-
desc "switchover
|
129
|
+
desc "switchover",
|
126
130
|
"Puts the source database in read only mode after all the data is flushed and written"
|
127
131
|
method_option :group_name,
|
128
132
|
aliases: "-g",
|
@@ -130,21 +134,18 @@ module PgEasyReplicate
|
|
130
134
|
desc: "Name of the group previously provisioned"
|
131
135
|
method_option :lag_delta_size,
|
132
136
|
aliases: "-l",
|
133
|
-
desc:
|
137
|
+
desc:
|
138
|
+
"The size of the lag to watch for before switchover. Default 200KB."
|
134
139
|
method_option :skip_vacuum_analyze,
|
135
140
|
type: :boolean,
|
136
141
|
default: false,
|
137
142
|
aliases: "-s",
|
138
143
|
desc: "Skip vacuum analyzing tables before switchover."
|
139
|
-
# method_option :bi_directional,
|
140
|
-
# aliases: "-b",
|
141
|
-
# desc:
|
142
|
-
# "Setup replication from target database to source database"
|
143
144
|
def switchover
|
144
145
|
PgEasyReplicate::Orchestrate.switchover(
|
145
146
|
group_name: options[:group_name],
|
146
147
|
lag_delta_size: options[:lag_delta_size],
|
147
|
-
skip_vacuum_analyze: options[:skip_vacuum_analyze]
|
148
|
+
skip_vacuum_analyze: options[:skip_vacuum_analyze],
|
148
149
|
)
|
149
150
|
end
|
150
151
|
|
@@ -162,6 +163,71 @@ module PgEasyReplicate
|
|
162
163
|
end
|
163
164
|
end
|
164
165
|
|
166
|
+
desc "list_ddl_changes", "Lists recent DDL changes in the source database"
|
167
|
+
method_option :group_name,
|
168
|
+
aliases: "-g",
|
169
|
+
required: true,
|
170
|
+
desc: "Name of the group"
|
171
|
+
method_option :limit,
|
172
|
+
aliases: "-l",
|
173
|
+
type: :numeric,
|
174
|
+
default: 100,
|
175
|
+
desc: "Limit the number of DDL changes to display"
|
176
|
+
def list_ddl_changes
|
177
|
+
changes =
|
178
|
+
PgEasyReplicate::DDLManager.list_ddl_changes(
|
179
|
+
group_name: options[:group_name],
|
180
|
+
limit: options[:limit],
|
181
|
+
)
|
182
|
+
puts JSON.pretty_generate(changes)
|
183
|
+
end
|
184
|
+
|
185
|
+
desc "apply_ddl_change", "Applies DDL changes to the target database"
|
186
|
+
method_option :group_name,
|
187
|
+
aliases: "-g",
|
188
|
+
required: true,
|
189
|
+
desc: "Name of the group"
|
190
|
+
method_option :id,
|
191
|
+
aliases: "-i",
|
192
|
+
type: :numeric,
|
193
|
+
desc:
|
194
|
+
"ID of the specific DDL change to apply. If not provided, all changes will be applied."
|
195
|
+
def apply_ddl_change
|
196
|
+
if options[:id]
|
197
|
+
PgEasyReplicate::DDLManager.apply_ddl_change(
|
198
|
+
group_name: options[:group_name],
|
199
|
+
id: options[:id],
|
200
|
+
)
|
201
|
+
puts "DDL change with ID #{options[:id]} applied successfully."
|
202
|
+
else
|
203
|
+
changes =
|
204
|
+
PgEasyReplicate::DDLManager.list_ddl_changes(
|
205
|
+
group_name: options[:group_name],
|
206
|
+
)
|
207
|
+
if changes.empty?
|
208
|
+
puts "No pending DDL changes to apply."
|
209
|
+
return
|
210
|
+
end
|
211
|
+
|
212
|
+
puts "The following DDL changes will be applied:"
|
213
|
+
changes.each do |change|
|
214
|
+
puts "ID: #{change[:id]}, Type: #{change[:object_type]}, Command: #{change[:ddl_command]}"
|
215
|
+
end
|
216
|
+
puts ""
|
217
|
+
print("Do you want to apply all these changes? (y/n): ")
|
218
|
+
confirmation = $stdin.gets.chomp.downcase
|
219
|
+
|
220
|
+
if confirmation == "y"
|
221
|
+
PgEasyReplicate::DDLManager.apply_all_ddl_changes(
|
222
|
+
group_name: options[:group_name],
|
223
|
+
)
|
224
|
+
puts "All pending DDL changes applied successfully."
|
225
|
+
else
|
226
|
+
puts "Operation cancelled."
|
227
|
+
end
|
228
|
+
end
|
229
|
+
end
|
230
|
+
|
165
231
|
desc "version", "Prints the version"
|
166
232
|
def version
|
167
233
|
puts PgEasyReplicate::VERSION
|
@@ -0,0 +1,256 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "pg_query"
|
4
|
+
|
5
|
+
module PgEasyReplicate
|
6
|
+
class DDLAudit
|
7
|
+
extend Helper
|
8
|
+
|
9
|
+
class << self
|
10
|
+
def setup(group_name)
|
11
|
+
conn = connect_to_internal_schema
|
12
|
+
return if conn.table_exists?(table_name)
|
13
|
+
|
14
|
+
begin
|
15
|
+
conn.create_table(table_name) do
|
16
|
+
primary_key(:id)
|
17
|
+
String(:group_name, null: false)
|
18
|
+
String(:event_type, null: false)
|
19
|
+
String(:object_type)
|
20
|
+
String(:object_identity)
|
21
|
+
String(:ddl_command, text: true)
|
22
|
+
DateTime(:created_at, default: Sequel::CURRENT_TIMESTAMP)
|
23
|
+
end
|
24
|
+
|
25
|
+
create_trigger_function(conn, group_name)
|
26
|
+
create_event_triggers(conn, group_name)
|
27
|
+
rescue => e
|
28
|
+
abort_with("Failed to set up DDL audit: #{e.message}")
|
29
|
+
ensure
|
30
|
+
conn&.disconnect
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
def create(
|
35
|
+
group_name,
|
36
|
+
event_type,
|
37
|
+
object_type,
|
38
|
+
object_identity,
|
39
|
+
ddl_command
|
40
|
+
)
|
41
|
+
conn = connect_to_internal_schema
|
42
|
+
begin
|
43
|
+
conn[table_name].insert(
|
44
|
+
group_name: group_name,
|
45
|
+
event_type: event_type,
|
46
|
+
object_type: object_type,
|
47
|
+
object_identity: object_identity,
|
48
|
+
ddl_command: ddl_command,
|
49
|
+
created_at: Time.now.utc,
|
50
|
+
)
|
51
|
+
rescue => e
|
52
|
+
abort_with("Adding DDL audit entry failed: #{e.message}")
|
53
|
+
ensure
|
54
|
+
conn&.disconnect
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
def list_changes(group_name, limit: 100)
|
59
|
+
conn = connect_to_internal_schema
|
60
|
+
begin
|
61
|
+
conn[table_name]
|
62
|
+
.where(group_name: group_name)
|
63
|
+
.order(Sequel.desc(:id))
|
64
|
+
.limit(limit)
|
65
|
+
.all
|
66
|
+
rescue => e
|
67
|
+
abort_with("Listing DDL changes failed: #{e.message}")
|
68
|
+
ensure
|
69
|
+
conn&.disconnect
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
def apply_change(source_conn_string, target_conn_string, group_name, id)
|
74
|
+
ddl_queries = fetch_ddl_query(source_conn_string, group_name, id: id)
|
75
|
+
apply_ddl_changes(target_conn_string, ddl_queries)
|
76
|
+
end
|
77
|
+
|
78
|
+
def apply_all_changes(source_conn_string, target_conn_string, group_name)
|
79
|
+
ddl_queries = fetch_ddl_query(source_conn_string, group_name)
|
80
|
+
apply_ddl_changes(target_conn_string, ddl_queries)
|
81
|
+
end
|
82
|
+
|
83
|
+
def drop(group_name)
|
84
|
+
conn = connect_to_internal_schema
|
85
|
+
begin
|
86
|
+
drop_event_triggers(conn, group_name)
|
87
|
+
drop_trigger_function(conn, group_name)
|
88
|
+
conn[table_name].where(group_name: group_name).delete
|
89
|
+
rescue => e
|
90
|
+
abort_with("Dropping DDL audit failed: #{e.message}")
|
91
|
+
ensure
|
92
|
+
conn&.disconnect
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
private
|
97
|
+
|
98
|
+
def table_name
|
99
|
+
:pger_ddl_audits
|
100
|
+
end
|
101
|
+
|
102
|
+
def connect_to_internal_schema(conn_string = nil)
|
103
|
+
Query.connect(
|
104
|
+
connection_url: conn_string || source_db_url,
|
105
|
+
schema: internal_schema_name,
|
106
|
+
)
|
107
|
+
end
|
108
|
+
|
109
|
+
def create_trigger_function(conn, group_name)
|
110
|
+
group = PgEasyReplicate::Group.find(group_name)
|
111
|
+
tables = group[:table_names].split(",").map(&:strip)
|
112
|
+
schema_name = group[:schema_name]
|
113
|
+
sanitized_group_name = sanitize_identifier(group_name)
|
114
|
+
|
115
|
+
full_table_names = tables.map { |table| "#{schema_name}.#{table}" }
|
116
|
+
puts "full_table_names: #{full_table_names}"
|
117
|
+
conn.run(<<~SQL)
|
118
|
+
CREATE OR REPLACE FUNCTION #{internal_schema_name}.pger_ddl_trigger_#{sanitized_group_name}() RETURNS event_trigger AS $$
|
119
|
+
DECLARE
|
120
|
+
obj record;
|
121
|
+
ddl_command text;
|
122
|
+
affected_table text;
|
123
|
+
BEGIN
|
124
|
+
SELECT current_query() INTO ddl_command;
|
125
|
+
|
126
|
+
IF TG_EVENT = 'ddl_command_end' THEN
|
127
|
+
FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands()
|
128
|
+
LOOP
|
129
|
+
IF obj.object_type = 'table' AND obj.object_identity = ANY(ARRAY['#{full_table_names.join("','")}']) THEN
|
130
|
+
INSERT INTO #{internal_schema_name}.#{table_name} (group_name, event_type, object_type, object_identity, ddl_command)
|
131
|
+
VALUES ('#{group_name}', TG_EVENT, obj.object_type, obj.object_identity, ddl_command);
|
132
|
+
ELSIF obj.object_type = 'index' THEN
|
133
|
+
SELECT (regexp_match(ddl_command, 'ON\\s+(\\S+)'))[1] INTO affected_table;
|
134
|
+
IF affected_table = ANY(ARRAY['#{full_table_names.join("','")}']) THEN
|
135
|
+
INSERT INTO #{internal_schema_name}.#{table_name} (group_name, event_type, object_type, object_identity, ddl_command)
|
136
|
+
VALUES ('#{group_name}', TG_EVENT, obj.object_type, obj.object_identity, ddl_command);
|
137
|
+
END IF;
|
138
|
+
END IF;
|
139
|
+
END LOOP;
|
140
|
+
ELSIF TG_EVENT = 'sql_drop' THEN
|
141
|
+
FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects()
|
142
|
+
LOOP
|
143
|
+
IF obj.object_type IN ('table', 'index') AND
|
144
|
+
(obj.object_identity = ANY(ARRAY['#{full_table_names.join("','")}']) OR
|
145
|
+
obj.object_identity ~ ('^' || '#{schema_name}' || '\\.(.*?)_.*$'))
|
146
|
+
THEN
|
147
|
+
INSERT INTO #{internal_schema_name}.#{table_name} (group_name, event_type, object_type, object_identity, ddl_command)
|
148
|
+
VALUES ('#{group_name}', TG_EVENT, obj.object_type, obj.object_identity, ddl_command);
|
149
|
+
END IF;
|
150
|
+
END LOOP;
|
151
|
+
ELSIF TG_EVENT = 'table_rewrite' THEN
|
152
|
+
FOR obj IN SELECT * FROM pg_event_trigger_table_rewrite_oid()
|
153
|
+
LOOP
|
154
|
+
IF obj.oid::regclass::text = ANY(ARRAY['#{full_table_names.join("','")}']) THEN
|
155
|
+
INSERT INTO #{internal_schema_name}.#{table_name} (group_name, event_type, object_type, object_identity, ddl_command)
|
156
|
+
VALUES ('#{group_name}', TG_EVENT, 'table', obj.oid::regclass::text, ddl_command);
|
157
|
+
END IF;
|
158
|
+
END LOOP;
|
159
|
+
END IF;
|
160
|
+
END;
|
161
|
+
$$ LANGUAGE plpgsql;
|
162
|
+
SQL
|
163
|
+
rescue => e
|
164
|
+
abort_with("Creating DDL trigger function failed: #{e.message}")
|
165
|
+
end
|
166
|
+
|
167
|
+
def create_event_triggers(conn, group_name)
|
168
|
+
sanitized_group_name = sanitize_identifier(group_name)
|
169
|
+
conn.run(<<~SQL)
|
170
|
+
DROP EVENT TRIGGER IF EXISTS pger_ddl_trigger_#{sanitized_group_name};
|
171
|
+
CREATE EVENT TRIGGER pger_ddl_trigger_#{sanitized_group_name} ON ddl_command_end
|
172
|
+
EXECUTE FUNCTION #{internal_schema_name}.pger_ddl_trigger_#{sanitized_group_name}();
|
173
|
+
|
174
|
+
DROP EVENT TRIGGER IF EXISTS pger_drop_trigger_#{sanitized_group_name};
|
175
|
+
CREATE EVENT TRIGGER pger_drop_trigger_#{sanitized_group_name} ON sql_drop
|
176
|
+
EXECUTE FUNCTION #{internal_schema_name}.pger_ddl_trigger_#{sanitized_group_name}();
|
177
|
+
|
178
|
+
DROP EVENT TRIGGER IF EXISTS pger_table_rewrite_trigger_#{sanitized_group_name};
|
179
|
+
CREATE EVENT TRIGGER pger_table_rewrite_trigger_#{sanitized_group_name} ON table_rewrite
|
180
|
+
EXECUTE FUNCTION #{internal_schema_name}.pger_ddl_trigger_#{sanitized_group_name}();
|
181
|
+
SQL
|
182
|
+
rescue => e
|
183
|
+
abort_with("Creating event triggers failed: #{e.message}")
|
184
|
+
end
|
185
|
+
|
186
|
+
def drop_event_triggers(conn, group_name)
|
187
|
+
sanitized_group_name = sanitize_identifier(group_name)
|
188
|
+
conn.run(<<~SQL)
|
189
|
+
DROP EVENT TRIGGER IF EXISTS pger_ddl_trigger_#{sanitized_group_name};
|
190
|
+
DROP EVENT TRIGGER IF EXISTS pger_drop_trigger_#{sanitized_group_name};
|
191
|
+
DROP EVENT TRIGGER IF EXISTS pger_table_rewrite_trigger_#{sanitized_group_name};
|
192
|
+
SQL
|
193
|
+
rescue => e
|
194
|
+
abort_with("Dropping event triggers failed: #{e.message}")
|
195
|
+
end
|
196
|
+
|
197
|
+
def drop_trigger_function(conn, group_name)
|
198
|
+
sanitized_group_name = sanitize_identifier(group_name)
|
199
|
+
conn.run(
|
200
|
+
"DROP FUNCTION IF EXISTS #{internal_schema_name}.pger_ddl_trigger_#{sanitized_group_name}();",
|
201
|
+
)
|
202
|
+
rescue => e
|
203
|
+
abort_with("Dropping trigger function failed: #{e.message}")
|
204
|
+
end
|
205
|
+
|
206
|
+
def self.extract_table_info(sql)
|
207
|
+
parsed = PgQuery.parse(sql)
|
208
|
+
stmt = parsed.tree.stmts.first.stmt
|
209
|
+
|
210
|
+
case stmt
|
211
|
+
when PgQuery::CreateStmt, PgQuery::IndexStmt, PgQuery::AlterTableStmt
|
212
|
+
schema_name = stmt.relation.schemaname || "public"
|
213
|
+
table_name = stmt.relation.relname
|
214
|
+
"#{schema_name}.#{table_name}"
|
215
|
+
end
|
216
|
+
rescue PgQuery::ParseError
|
217
|
+
nil
|
218
|
+
end
|
219
|
+
|
220
|
+
def sanitize_identifier(identifier)
|
221
|
+
identifier.gsub(/[^a-zA-Z0-9_]/, "_")
|
222
|
+
end
|
223
|
+
|
224
|
+
def fetch_ddl_query(source_conn_string, group_name, id: nil)
|
225
|
+
source_conn = connect_to_internal_schema(source_conn_string)
|
226
|
+
begin
|
227
|
+
query = source_conn[table_name].where(group_name: group_name)
|
228
|
+
query = query.where(id: id) if id
|
229
|
+
result = query.order(:id).select_map(:ddl_command)
|
230
|
+
result.uniq
|
231
|
+
rescue => e
|
232
|
+
abort_with("Fetching DDL queries failed: #{e.message}")
|
233
|
+
ensure
|
234
|
+
source_conn&.disconnect
|
235
|
+
end
|
236
|
+
end
|
237
|
+
|
238
|
+
def apply_ddl_changes(target_conn_string, ddl_queries)
|
239
|
+
target_conn = Query.connect(connection_url: target_conn_string)
|
240
|
+
begin
|
241
|
+
ddl_queries.each do |query|
|
242
|
+
target_conn.run(query)
|
243
|
+
rescue => e
|
244
|
+
abort_with(
|
245
|
+
"Error executing DDL command: #{query}. Error: #{e.message}",
|
246
|
+
)
|
247
|
+
end
|
248
|
+
rescue => e
|
249
|
+
abort_with("Applying DDL changes failed: #{e.message}")
|
250
|
+
ensure
|
251
|
+
target_conn&.disconnect
|
252
|
+
end
|
253
|
+
end
|
254
|
+
end
|
255
|
+
end
|
256
|
+
end
|
@@ -0,0 +1,56 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module PgEasyReplicate
|
4
|
+
module DDLManager
|
5
|
+
extend Helper
|
6
|
+
|
7
|
+
class << self
|
8
|
+
def setup_ddl_tracking(
|
9
|
+
group_name:, conn_string: source_db_url,
|
10
|
+
schema: "public"
|
11
|
+
)
|
12
|
+
DDLAudit.setup(group_name)
|
13
|
+
end
|
14
|
+
|
15
|
+
def cleanup_ddl_tracking(
|
16
|
+
group_name:, conn_string: source_db_url,
|
17
|
+
schema: "public"
|
18
|
+
)
|
19
|
+
DDLAudit.drop(group_name)
|
20
|
+
end
|
21
|
+
|
22
|
+
def list_ddl_changes(
|
23
|
+
group_name:, conn_string: source_db_url,
|
24
|
+
schema: "public",
|
25
|
+
limit: 100
|
26
|
+
)
|
27
|
+
DDLAudit.list_changes(group_name, limit: limit)
|
28
|
+
end
|
29
|
+
|
30
|
+
def apply_ddl_change(
|
31
|
+
group_name:, id:, source_conn_string: source_db_url,
|
32
|
+
target_conn_string: target_db_url,
|
33
|
+
schema: "public"
|
34
|
+
)
|
35
|
+
DDLAudit.apply_change(
|
36
|
+
source_conn_string,
|
37
|
+
target_conn_string,
|
38
|
+
group_name,
|
39
|
+
id,
|
40
|
+
)
|
41
|
+
end
|
42
|
+
|
43
|
+
def apply_all_ddl_changes(
|
44
|
+
group_name:, source_conn_string: source_db_url,
|
45
|
+
target_conn_string: target_db_url,
|
46
|
+
schema: "public"
|
47
|
+
)
|
48
|
+
DDLAudit.apply_all_changes(
|
49
|
+
source_conn_string,
|
50
|
+
target_conn_string,
|
51
|
+
group_name,
|
52
|
+
)
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
@@ -15,7 +15,7 @@ module PgEasyReplicate
|
|
15
15
|
schema: schema_name,
|
16
16
|
conn_string: source_db_url,
|
17
17
|
list: options[:tables],
|
18
|
-
exclude_list: options[:exclude_tables]
|
18
|
+
exclude_list: options[:exclude_tables],
|
19
19
|
)
|
20
20
|
|
21
21
|
if options[:recreate_indices_post_copy]
|
@@ -52,25 +52,19 @@ module PgEasyReplicate
|
|
52
52
|
started_at: Time.now.utc,
|
53
53
|
recreate_indices_post_copy: options[:recreate_indices_post_copy],
|
54
54
|
)
|
55
|
-
rescue => e
|
56
|
-
stop_sync(
|
57
|
-
group_name: options[:group_name],
|
58
|
-
source_conn_string: source_db_url,
|
59
|
-
target_conn_string: target_db_url,
|
60
|
-
)
|
61
55
|
|
62
|
-
if
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
table_names: tables.join(","),
|
68
|
-
schema_name: schema_name,
|
69
|
-
started_at: Time.now.utc,
|
70
|
-
failed_at: Time.now.utc,
|
56
|
+
if options[:track_ddl]
|
57
|
+
DDLManager.setup_ddl_tracking(
|
58
|
+
conn_string: source_db_url,
|
59
|
+
group_name: options[:group_name],
|
60
|
+
schema: schema_name,
|
71
61
|
)
|
72
62
|
end
|
73
|
-
|
63
|
+
rescue => e
|
64
|
+
stop_sync(group_name: options[:group_name])
|
65
|
+
if Group.find(options[:group_name])
|
66
|
+
Group.update(name: options[:group_name], failed_at: Time.now.utc)
|
67
|
+
end
|
74
68
|
abort_with("Starting sync failed: #{e.message}")
|
75
69
|
end
|
76
70
|
|
@@ -180,8 +174,8 @@ module PgEasyReplicate
|
|
180
174
|
|
181
175
|
def stop_sync(
|
182
176
|
group_name:,
|
183
|
-
source_conn_string:
|
184
|
-
target_conn_string:
|
177
|
+
source_conn_string: nil,
|
178
|
+
target_conn_string: nil
|
185
179
|
)
|
186
180
|
logger.info(
|
187
181
|
"Stopping sync",
|
@@ -192,29 +186,34 @@ module PgEasyReplicate
|
|
192
186
|
)
|
193
187
|
drop_publication(
|
194
188
|
group_name: group_name,
|
195
|
-
conn_string: source_conn_string,
|
189
|
+
conn_string: source_conn_string || source_db_url,
|
196
190
|
)
|
197
191
|
drop_subscription(
|
198
192
|
group_name: group_name,
|
199
|
-
target_conn_string: target_conn_string,
|
193
|
+
target_conn_string: target_conn_string || target_db_url,
|
200
194
|
)
|
201
195
|
rescue => e
|
202
|
-
|
196
|
+
abort_with("Unable to stop sync: #{e.message}")
|
203
197
|
end
|
204
198
|
|
205
199
|
def switchover(
|
206
200
|
group_name:,
|
207
|
-
source_conn_string: source_db_url,
|
208
|
-
target_conn_string: target_db_url,
|
209
201
|
lag_delta_size: nil,
|
210
|
-
skip_vacuum_analyze: false
|
202
|
+
skip_vacuum_analyze: false,
|
203
|
+
source_conn_string: nil,
|
204
|
+
target_conn_string: nil
|
211
205
|
)
|
212
206
|
group = Group.find(group_name)
|
207
|
+
abort_with("Group not found: #{group_name}") unless group
|
208
|
+
|
213
209
|
tables_list = group[:table_names].split(",")
|
214
210
|
|
211
|
+
source_conn = source_conn_string || source_db_url
|
212
|
+
target_conn = target_conn_string || target_db_url
|
213
|
+
|
215
214
|
unless skip_vacuum_analyze
|
216
215
|
run_vacuum_analyze(
|
217
|
-
conn_string:
|
216
|
+
conn_string: target_conn,
|
218
217
|
tables: tables_list,
|
219
218
|
schema: group[:schema_name],
|
220
219
|
)
|
@@ -225,39 +224,35 @@ module PgEasyReplicate
|
|
225
224
|
if group[:recreate_indices_post_copy]
|
226
225
|
IndexManager.wait_for_replication_completion(group_name: group_name)
|
227
226
|
IndexManager.recreate_indices(
|
228
|
-
source_conn_string:
|
229
|
-
target_conn_string:
|
227
|
+
source_conn_string: source_conn,
|
228
|
+
target_conn_string: target_conn,
|
230
229
|
tables: tables_list,
|
231
230
|
schema: group[:schema_name],
|
232
231
|
)
|
233
232
|
end
|
234
233
|
|
235
|
-
# Watch for lag again, because it could've grown during index recreation
|
236
234
|
watch_lag(group_name: group_name, lag: lag_delta_size || DEFAULT_LAG)
|
237
235
|
|
238
236
|
revoke_connections_on_source_db(group_name)
|
239
237
|
wait_for_remaining_catchup(group_name)
|
240
|
-
refresh_sequences(
|
241
|
-
conn_string: target_conn_string,
|
242
|
-
schema: group[:schema_name],
|
243
|
-
)
|
238
|
+
refresh_sequences(conn_string: target_conn, schema: group[:schema_name])
|
244
239
|
mark_switchover_complete(group_name)
|
245
|
-
|
240
|
+
|
246
241
|
unless skip_vacuum_analyze
|
247
242
|
run_vacuum_analyze(
|
248
|
-
conn_string:
|
243
|
+
conn_string: target_conn,
|
249
244
|
tables: tables_list,
|
250
245
|
schema: group[:schema_name],
|
251
246
|
)
|
252
247
|
end
|
248
|
+
|
253
249
|
drop_subscription(
|
254
250
|
group_name: group_name,
|
255
|
-
target_conn_string:
|
251
|
+
target_conn_string: target_conn,
|
256
252
|
)
|
257
253
|
rescue => e
|
258
254
|
restore_connections_on_source_db(group_name)
|
259
|
-
|
260
|
-
abort_with("Switchover sync failed: #{e.message}")
|
255
|
+
abort_with("Switchover failed: #{e.message}")
|
261
256
|
end
|
262
257
|
|
263
258
|
def watch_lag(group_name:, wait_time: DEFAULT_WAIT, lag: DEFAULT_LAG)
|
data/lib/pg_easy_replicate.rb
CHANGED
@@ -6,6 +6,7 @@ require "pg"
|
|
6
6
|
require "sequel"
|
7
7
|
require "open3"
|
8
8
|
require "English"
|
9
|
+
require "pg_query"
|
9
10
|
|
10
11
|
require "pg_easy_replicate/helper"
|
11
12
|
require "pg_easy_replicate/version"
|
@@ -15,6 +16,8 @@ require "pg_easy_replicate/orchestrate"
|
|
15
16
|
require "pg_easy_replicate/stats"
|
16
17
|
require "pg_easy_replicate/group"
|
17
18
|
require "pg_easy_replicate/cli"
|
19
|
+
require "pg_easy_replicate/ddl_audit"
|
20
|
+
require "pg_easy_replicate/ddl_manager"
|
18
21
|
|
19
22
|
Sequel.default_timezone = :utc
|
20
23
|
module PgEasyReplicate
|
@@ -200,6 +203,14 @@ module PgEasyReplicate
|
|
200
203
|
logger.info("Dropping replication user on target database")
|
201
204
|
drop_user(conn_string: target_db_url)
|
202
205
|
end
|
206
|
+
-> do
|
207
|
+
if options[:everything]
|
208
|
+
PgEasyReplicate::DDLManager.cleanup_ddl_tracking(
|
209
|
+
conn_string: source_db_url,
|
210
|
+
group_name: options[:group_name],
|
211
|
+
)
|
212
|
+
end
|
213
|
+
end
|
203
214
|
end,
|
204
215
|
]
|
205
216
|
|
data/scripts/e2e-start.sh
CHANGED
@@ -10,9 +10,17 @@ export SOURCE_DB_URL="postgres://james-bond:james-bond123%407%21%273aaR@localhos
|
|
10
10
|
export TARGET_DB_URL="postgres://james-bond:james-bond123%407%21%273aaR@localhost:5433/postgres-db"
|
11
11
|
export PGPASSWORD='james-bond123@7!'"'"''"'"'3aaR'
|
12
12
|
|
13
|
-
# Config check, Bootstrap and cleanup
|
14
13
|
echo "===== Performing Bootstrap and cleanup"
|
15
14
|
bundle exec bin/pg_easy_replicate bootstrap -g cluster-1 --copy-schema
|
16
|
-
bundle exec bin/pg_easy_replicate start_sync -g cluster-1 -s public --recreate-indices-post-copy
|
15
|
+
bundle exec bin/pg_easy_replicate start_sync -g cluster-1 -s public --recreate-indices-post-copy --track-ddl
|
17
16
|
bundle exec bin/pg_easy_replicate stats -g cluster-1
|
17
|
+
|
18
|
+
echo "===== Applying DDL change"
|
19
|
+
psql $SOURCE_DB_URL -c "ALTER TABLE public.pgbench_accounts ADD COLUMN test_column VARCHAR(255)"
|
20
|
+
|
21
|
+
echo "===== Applying DDL changes"
|
22
|
+
echo "Y" | bundle exec bin/pg_easy_replicate apply_ddl_change -g cluster-1
|
23
|
+
|
24
|
+
# Switchover
|
25
|
+
echo "===== Performing switchover"
|
18
26
|
bundle exec bin/pg_easy_replicate switchover -g cluster-1
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: pg_easy_replicate
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.3.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Shayon Mukherjee
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-08-
|
11
|
+
date: 2024-08-31 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: ougai
|
@@ -38,6 +38,20 @@ dependencies:
|
|
38
38
|
- - "~>"
|
39
39
|
- !ruby/object:Gem::Version
|
40
40
|
version: 1.5.3
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
name: pg_query
|
43
|
+
requirement: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - "~>"
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: 5.1.0
|
48
|
+
type: :runtime
|
49
|
+
prerelease: false
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - "~>"
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: 5.1.0
|
41
55
|
- !ruby/object:Gem::Dependency
|
42
56
|
name: sequel
|
43
57
|
requirement: !ruby/object:Gem::Requirement
|
@@ -274,6 +288,8 @@ files:
|
|
274
288
|
- docker-compose.yml
|
275
289
|
- lib/pg_easy_replicate.rb
|
276
290
|
- lib/pg_easy_replicate/cli.rb
|
291
|
+
- lib/pg_easy_replicate/ddl_audit.rb
|
292
|
+
- lib/pg_easy_replicate/ddl_manager.rb
|
277
293
|
- lib/pg_easy_replicate/group.rb
|
278
294
|
- lib/pg_easy_replicate/helper.rb
|
279
295
|
- lib/pg_easy_replicate/index_manager.rb
|