@powersync/service-module-postgres 0.0.0-dev-20240918092408

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/CHANGELOG.md +18 -0
  2. package/LICENSE +67 -0
  3. package/README.md +3 -0
  4. package/dist/api/PostgresRouteAPIAdapter.d.ts +22 -0
  5. package/dist/api/PostgresRouteAPIAdapter.js +273 -0
  6. package/dist/api/PostgresRouteAPIAdapter.js.map +1 -0
  7. package/dist/auth/SupabaseKeyCollector.d.ts +22 -0
  8. package/dist/auth/SupabaseKeyCollector.js +64 -0
  9. package/dist/auth/SupabaseKeyCollector.js.map +1 -0
  10. package/dist/index.d.ts +3 -0
  11. package/dist/index.js +4 -0
  12. package/dist/index.js.map +1 -0
  13. package/dist/module/PostgresModule.d.ts +14 -0
  14. package/dist/module/PostgresModule.js +108 -0
  15. package/dist/module/PostgresModule.js.map +1 -0
  16. package/dist/replication/ConnectionManagerFactory.d.ts +10 -0
  17. package/dist/replication/ConnectionManagerFactory.js +21 -0
  18. package/dist/replication/ConnectionManagerFactory.js.map +1 -0
  19. package/dist/replication/PgManager.d.ts +25 -0
  20. package/dist/replication/PgManager.js +60 -0
  21. package/dist/replication/PgManager.js.map +1 -0
  22. package/dist/replication/PgRelation.d.ts +6 -0
  23. package/dist/replication/PgRelation.js +27 -0
  24. package/dist/replication/PgRelation.js.map +1 -0
  25. package/dist/replication/PostgresErrorRateLimiter.d.ts +11 -0
  26. package/dist/replication/PostgresErrorRateLimiter.js +43 -0
  27. package/dist/replication/PostgresErrorRateLimiter.js.map +1 -0
  28. package/dist/replication/WalStream.d.ts +53 -0
  29. package/dist/replication/WalStream.js +536 -0
  30. package/dist/replication/WalStream.js.map +1 -0
  31. package/dist/replication/WalStreamReplicationJob.d.ts +27 -0
  32. package/dist/replication/WalStreamReplicationJob.js +131 -0
  33. package/dist/replication/WalStreamReplicationJob.js.map +1 -0
  34. package/dist/replication/WalStreamReplicator.d.ts +13 -0
  35. package/dist/replication/WalStreamReplicator.js +36 -0
  36. package/dist/replication/WalStreamReplicator.js.map +1 -0
  37. package/dist/replication/replication-index.d.ts +5 -0
  38. package/dist/replication/replication-index.js +6 -0
  39. package/dist/replication/replication-index.js.map +1 -0
  40. package/dist/replication/replication-utils.d.ts +32 -0
  41. package/dist/replication/replication-utils.js +272 -0
  42. package/dist/replication/replication-utils.js.map +1 -0
  43. package/dist/types/types.d.ts +76 -0
  44. package/dist/types/types.js +110 -0
  45. package/dist/types/types.js.map +1 -0
  46. package/dist/utils/migration_lib.d.ts +11 -0
  47. package/dist/utils/migration_lib.js +64 -0
  48. package/dist/utils/migration_lib.js.map +1 -0
  49. package/dist/utils/pgwire_utils.d.ts +16 -0
  50. package/dist/utils/pgwire_utils.js +70 -0
  51. package/dist/utils/pgwire_utils.js.map +1 -0
  52. package/dist/utils/populate_test_data.d.ts +8 -0
  53. package/dist/utils/populate_test_data.js +65 -0
  54. package/dist/utils/populate_test_data.js.map +1 -0
  55. package/package.json +49 -0
  56. package/src/api/PostgresRouteAPIAdapter.ts +307 -0
  57. package/src/auth/SupabaseKeyCollector.ts +70 -0
  58. package/src/index.ts +5 -0
  59. package/src/module/PostgresModule.ts +122 -0
  60. package/src/replication/ConnectionManagerFactory.ts +28 -0
  61. package/src/replication/PgManager.ts +70 -0
  62. package/src/replication/PgRelation.ts +31 -0
  63. package/src/replication/PostgresErrorRateLimiter.ts +44 -0
  64. package/src/replication/WalStream.ts +639 -0
  65. package/src/replication/WalStreamReplicationJob.ts +142 -0
  66. package/src/replication/WalStreamReplicator.ts +45 -0
  67. package/src/replication/replication-index.ts +5 -0
  68. package/src/replication/replication-utils.ts +329 -0
  69. package/src/types/types.ts +159 -0
  70. package/src/utils/migration_lib.ts +79 -0
  71. package/src/utils/pgwire_utils.ts +73 -0
  72. package/src/utils/populate_test_data.ts +77 -0
  73. package/test/src/__snapshots__/pg_test.test.ts.snap +256 -0
  74. package/test/src/env.ts +7 -0
  75. package/test/src/large_batch.test.ts +195 -0
  76. package/test/src/pg_test.test.ts +450 -0
  77. package/test/src/schema_changes.test.ts +543 -0
  78. package/test/src/setup.ts +7 -0
  79. package/test/src/slow_tests.test.ts +335 -0
  80. package/test/src/util.ts +105 -0
  81. package/test/src/validation.test.ts +64 -0
  82. package/test/src/wal_stream.test.ts +319 -0
  83. package/test/src/wal_stream_utils.ts +121 -0
  84. package/test/tsconfig.json +28 -0
  85. package/tsconfig.json +31 -0
  86. package/tsconfig.tsbuildinfo +1 -0
  87. package/vitest.config.ts +9 -0
@@ -0,0 +1,142 @@
1
+ import { MissingReplicationSlotError, WalStream } from './WalStream.js';
2
+ import { container } from '@powersync/lib-services-framework';
3
+ import { PgManager } from './PgManager.js';
4
+
5
+ import { replication } from '@powersync/service-core';
6
+ import { ConnectionManagerFactory } from './ConnectionManagerFactory.js';
7
+
8
+ export interface WalStreamReplicationJobOptions extends replication.AbstractReplicationJobOptions {
9
+ connectionFactory: ConnectionManagerFactory;
10
+ }
11
+
12
+ export class WalStreamReplicationJob extends replication.AbstractReplicationJob {
13
+ private connectionFactory: ConnectionManagerFactory;
14
+ private readonly connectionManager: PgManager;
15
+
16
+ constructor(options: WalStreamReplicationJobOptions) {
17
+ super(options);
18
+ this.connectionFactory = options.connectionFactory;
19
+ this.connectionManager = this.connectionFactory.create({
20
+ // Pool connections are only used intermittently.
21
+ idleTimeout: 30_000,
22
+ maxSize: 2
23
+ });
24
+ }
25
+
26
+ /**
27
+ * Postgres on RDS writes performs a WAL checkpoint every 5 minutes by default, which creates a new 64MB file.
28
+ *
29
+ * The old WAL files are only deleted once no replication slot still references it.
30
+ *
31
+ * Unfortunately, when there are no changes to the db, the database creates new WAL files without the replication slot
32
+ * advancing**.
33
+ *
34
+ * As a workaround, we write a new message every couple of minutes, to make sure that the replication slot advances.
35
+ *
36
+ * **This may be a bug in pgwire or how we're using it.
37
+ */
38
+ async keepAlive() {
39
+ try {
40
+ await this.connectionManager.pool.query(`SELECT * FROM pg_logical_emit_message(false, 'powersync', 'ping')`);
41
+ } catch (e) {
42
+ this.logger.warn(`KeepAlive failed, unable to post to WAL`, e);
43
+ }
44
+ }
45
+
46
+ get slotName() {
47
+ return this.options.storage.slot_name;
48
+ }
49
+
50
+ async replicate() {
51
+ try {
52
+ await this.replicateLoop();
53
+ } catch (e) {
54
+ // Fatal exception
55
+ container.reporter.captureException(e, {
56
+ metadata: {
57
+ replication_slot: this.slotName
58
+ }
59
+ });
60
+ this.logger.error(`Replication failed on ${this.slotName}`, e);
61
+
62
+ if (e instanceof MissingReplicationSlotError) {
63
+ // This stops replication on this slot, and creates a new slot
64
+ await this.options.storage.factory.slotRemoved(this.slotName);
65
+ }
66
+ } finally {
67
+ this.abortController.abort();
68
+ }
69
+ }
70
+
71
+ async replicateLoop() {
72
+ while (!this.isStopped) {
73
+ await this.replicateOnce();
74
+
75
+ if (!this.isStopped) {
76
+ await new Promise((resolve) => setTimeout(resolve, 5000));
77
+ }
78
+ }
79
+ }
80
+
81
+ async replicateOnce() {
82
+ // New connections on every iteration (every error with retry),
83
+ // otherwise we risk repeating errors related to the connection,
84
+ // such as caused by cached PG schemas.
85
+ const connectionManager = this.connectionFactory.create({
86
+ // Pool connections are only used intermittently.
87
+ idleTimeout: 30_000,
88
+ maxSize: 2
89
+ });
90
+ try {
91
+ await this.rateLimiter?.waitUntilAllowed({ signal: this.abortController.signal });
92
+ if (this.isStopped) {
93
+ return;
94
+ }
95
+ const stream = new WalStream({
96
+ abort_signal: this.abortController.signal,
97
+ storage: this.options.storage,
98
+ connections: connectionManager
99
+ });
100
+ await stream.replicate();
101
+ } catch (e) {
102
+ this.logger.error(`Replication error`, e);
103
+ if (e.cause != null) {
104
+ // Example:
105
+ // PgError.conn_ended: Unable to do postgres query on ended connection
106
+ // at PgConnection.stream (file:///.../powersync/node_modules/.pnpm/github.com+kagis+pgwire@f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87/node_modules/pgwire/mod.js:315:13)
107
+ // at stream.next (<anonymous>)
108
+ // at PgResult.fromStream (file:///.../powersync/node_modules/.pnpm/github.com+kagis+pgwire@f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87/node_modules/pgwire/mod.js:1174:22)
109
+ // at PgConnection.query (file:///.../powersync/node_modules/.pnpm/github.com+kagis+pgwire@f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87/node_modules/pgwire/mod.js:311:21)
110
+ // at WalStream.startInitialReplication (file:///.../powersync/powersync-service/lib/replication/WalStream.js:266:22)
111
+ // ...
112
+ // cause: TypeError: match is not iterable
113
+ // at timestamptzToSqlite (file:///.../powersync/packages/jpgwire/dist/util.js:140:50)
114
+ // at PgType.decode (file:///.../powersync/packages/jpgwire/dist/pgwire_types.js:25:24)
115
+ // at PgConnection._recvDataRow (file:///.../powersync/packages/jpgwire/dist/util.js:88:22)
116
+ // at PgConnection._recvMessages (file:///.../powersync/node_modules/.pnpm/github.com+kagis+pgwire@f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87/node_modules/pgwire/mod.js:656:30)
117
+ // at PgConnection._ioloopAttempt (file:///.../powersync/node_modules/.pnpm/github.com+kagis+pgwire@f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87/node_modules/pgwire/mod.js:563:20)
118
+ // at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
119
+ // at async PgConnection._ioloop (file:///.../powersync/node_modules/.pnpm/github.com+kagis+pgwire@f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87/node_modules/pgwire/mod.js:517:14),
120
+ // [Symbol(pg.ErrorCode)]: 'conn_ended',
121
+ // [Symbol(pg.ErrorResponse)]: undefined
122
+ // }
123
+ // Without this additional log, the cause would not be visible in the logs.
124
+ this.logger.error(`cause`, e.cause);
125
+ }
126
+ if (e instanceof MissingReplicationSlotError) {
127
+ throw e;
128
+ } else {
129
+ // Report the error if relevant, before retrying
130
+ container.reporter.captureException(e, {
131
+ metadata: {
132
+ replication_slot: this.slotName
133
+ }
134
+ });
135
+ // This sets the retry delay
136
+ this.rateLimiter?.reportError(e);
137
+ }
138
+ } finally {
139
+ await connectionManager.end();
140
+ }
141
+ }
142
+ }
@@ -0,0 +1,45 @@
1
+ import { storage, replication } from '@powersync/service-core';
2
+ import { WalStreamReplicationJob } from './WalStreamReplicationJob.js';
3
+ import { ConnectionManagerFactory } from './ConnectionManagerFactory.js';
4
+ import { cleanUpReplicationSlot } from './replication-utils.js';
5
+
6
+ export interface WalStreamReplicatorOptions extends replication.AbstractReplicatorOptions {
7
+ connectionFactory: ConnectionManagerFactory;
8
+ }
9
+
10
+ export class WalStreamReplicator extends replication.AbstractReplicator<WalStreamReplicationJob> {
11
+ private readonly connectionFactory: ConnectionManagerFactory;
12
+
13
+ constructor(options: WalStreamReplicatorOptions) {
14
+ super(options);
15
+ this.connectionFactory = options.connectionFactory;
16
+ }
17
+
18
+ createJob(options: replication.CreateJobOptions): WalStreamReplicationJob {
19
+ return new WalStreamReplicationJob({
20
+ id: this.createJobId(options.storage.group_id),
21
+ storage: options.storage,
22
+ connectionFactory: this.connectionFactory,
23
+ lock: options.lock,
24
+ rateLimiter: this.rateLimiter
25
+ });
26
+ }
27
+
28
+ async cleanUp(syncRulesStorage: storage.SyncRulesBucketStorage): Promise<void> {
29
+ const connectionManager = this.connectionFactory.create({
30
+ idleTimeout: 30_000,
31
+ maxSize: 1
32
+ });
33
+ try {
34
+ // TODO: Slot_name will likely have to come from a different source in the future
35
+ await cleanUpReplicationSlot(syncRulesStorage.slot_name, connectionManager.pool);
36
+ } finally {
37
+ await connectionManager.end();
38
+ }
39
+ }
40
+
41
+ async stop(): Promise<void> {
42
+ await super.stop();
43
+ await this.connectionFactory.shutdown();
44
+ }
45
+ }
@@ -0,0 +1,5 @@
1
+ export * from './PgRelation.js';
2
+ export * from './replication-utils.js';
3
+ export * from './WalStream.js';
4
+ export * from './WalStreamReplicator.js';
5
+ export * from './WalStreamReplicationJob.js';
@@ -0,0 +1,329 @@
1
+ import * as pgwire from '@powersync/service-jpgwire';
2
+
3
+ import { PatternResult, storage } from '@powersync/service-core';
4
+ import * as pgwire_utils from '../utils/pgwire_utils.js';
5
+ import { ReplicationIdentity } from './PgRelation.js';
6
+ import * as sync_rules from '@powersync/service-sync-rules';
7
+ import * as service_types from '@powersync/service-types';
8
+ import * as pg_utils from '../utils/pgwire_utils.js';
9
+ import * as util from '../utils/pgwire_utils.js';
10
+ import { logger } from '@powersync/lib-services-framework';
11
+
12
+ export interface ReplicaIdentityResult {
13
+ replicationColumns: storage.ColumnDescriptor[];
14
+ replicationIdentity: ReplicationIdentity;
15
+ }
16
+
17
+ export async function getPrimaryKeyColumns(
18
+ db: pgwire.PgClient,
19
+ relationId: number,
20
+ mode: 'primary' | 'replident'
21
+ ): Promise<storage.ColumnDescriptor[]> {
22
+ const indexFlag = mode == 'primary' ? `i.indisprimary` : `i.indisreplident`;
23
+ const attrRows = await pgwire_utils.retriedQuery(db, {
24
+ statement: `SELECT a.attname as name, a.atttypid as typeid, t.typname as type, a.attnum as attnum
25
+ FROM pg_index i
26
+ JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = ANY (i.indkey)
27
+ JOIN pg_type t ON a.atttypid = t.oid
28
+ WHERE i.indrelid = $1::oid
29
+ AND ${indexFlag}
30
+ AND a.attnum > 0
31
+ ORDER BY a.attnum`,
32
+ params: [{ value: relationId, type: 'int4' }]
33
+ });
34
+
35
+ return attrRows.rows.map((row) => {
36
+ return {
37
+ name: row[0] as string,
38
+ typeId: row[1] as number
39
+ } satisfies storage.ColumnDescriptor;
40
+ });
41
+ }
42
+
43
+ export async function getAllColumns(db: pgwire.PgClient, relationId: number): Promise<storage.ColumnDescriptor[]> {
44
+ const attrRows = await pgwire_utils.retriedQuery(db, {
45
+ statement: `SELECT a.attname as name, a.atttypid as typeid, t.typname as type, a.attnum as attnum
46
+ FROM pg_attribute a
47
+ JOIN pg_type t ON a.atttypid = t.oid
48
+ WHERE a.attrelid = $1::oid
49
+ AND attnum > 0
50
+ ORDER BY a.attnum`,
51
+ params: [{ type: 'varchar', value: relationId }]
52
+ });
53
+ return attrRows.rows.map((row) => {
54
+ return {
55
+ name: row[0] as string,
56
+ typeId: row[1] as number
57
+ } satisfies storage.ColumnDescriptor;
58
+ });
59
+ }
60
+
61
+ export async function getReplicationIdentityColumns(
62
+ db: pgwire.PgClient,
63
+ relationId: number
64
+ ): Promise<ReplicaIdentityResult> {
65
+ const rows = await pgwire_utils.retriedQuery(db, {
66
+ statement: `SELECT CASE relreplident
67
+ WHEN 'd' THEN 'default'
68
+ WHEN 'n' THEN 'nothing'
69
+ WHEN 'f' THEN 'full'
70
+ WHEN 'i' THEN 'index'
71
+ END AS replica_identity
72
+ FROM pg_class
73
+ WHERE oid = $1::oid LIMIT 1`,
74
+ params: [{ type: 'int8', value: relationId }]
75
+ });
76
+ const idType: string = rows.rows[0]?.[0];
77
+ if (idType == 'nothing' || idType == null) {
78
+ return { replicationIdentity: 'nothing', replicationColumns: [] };
79
+ } else if (idType == 'full') {
80
+ return { replicationIdentity: 'full', replicationColumns: await getAllColumns(db, relationId) };
81
+ } else if (idType == 'default') {
82
+ return {
83
+ replicationIdentity: 'default',
84
+ replicationColumns: await getPrimaryKeyColumns(db, relationId, 'primary')
85
+ };
86
+ } else if (idType == 'index') {
87
+ return {
88
+ replicationIdentity: 'index',
89
+ replicationColumns: await getPrimaryKeyColumns(db, relationId, 'replident')
90
+ };
91
+ } else {
92
+ return { replicationIdentity: 'nothing', replicationColumns: [] };
93
+ }
94
+ }
95
+
96
+ export async function checkSourceConfiguration(db: pgwire.PgClient, publicationName: string) {
97
+ // Check basic config
98
+ await pgwire_utils.retriedQuery(
99
+ db,
100
+ `DO $$
101
+ BEGIN
102
+ if current_setting('wal_level') is distinct from 'logical' then
103
+ raise exception 'wal_level must be set to ''logical'', your database has it set to ''%''. Please edit your config file and restart PostgreSQL.', current_setting('wal_level');
104
+ end if;
105
+ if (current_setting('max_replication_slots')::int >= 1) is not true then
106
+ raise exception 'Your max_replication_slots setting is too low, it must be greater than 1. Please edit your config file and restart PostgreSQL.';
107
+ end if;
108
+ if (current_setting('max_wal_senders')::int >= 1) is not true then
109
+ raise exception 'Your max_wal_senders setting is too low, it must be greater than 1. Please edit your config file and restart PostgreSQL.';
110
+ end if;
111
+ end;
112
+ $$ LANGUAGE plpgsql;`
113
+ );
114
+
115
+ // Check that publication exists
116
+ const rs = await pgwire_utils.retriedQuery(db, {
117
+ statement: `SELECT * FROM pg_publication WHERE pubname = $1`,
118
+ params: [{ type: 'varchar', value: publicationName }]
119
+ });
120
+ const row = pgwire.pgwireRows(rs)[0];
121
+ if (row == null) {
122
+ throw new Error(
123
+ `Publication '${publicationName}' does not exist. Run: \`CREATE PUBLICATION ${publicationName} FOR ALL TABLES\`, or read the documentation for details.`
124
+ );
125
+ }
126
+ if (row.pubinsert == false || row.pubupdate == false || row.pubdelete == false || row.pubtruncate == false) {
127
+ throw new Error(
128
+ `Publication '${publicationName}' does not publish all changes. Create a publication using \`WITH (publish = "insert, update, delete, truncate")\` (the default).`
129
+ );
130
+ }
131
+ if (row.pubviaroot) {
132
+ throw new Error(`'${publicationName}' uses publish_via_partition_root, which is not supported.`);
133
+ }
134
+ }
135
+
136
+ export interface GetDebugTablesInfoOptions {
137
+ db: pgwire.PgClient;
138
+ publicationName: string;
139
+ connectionTag: string;
140
+ tablePatterns: sync_rules.TablePattern[];
141
+ syncRules: sync_rules.SqlSyncRules;
142
+ }
143
+
144
+ export async function getDebugTablesInfo(options: GetDebugTablesInfoOptions): Promise<PatternResult[]> {
145
+ const { db, publicationName, connectionTag, tablePatterns, syncRules } = options;
146
+ let result: PatternResult[] = [];
147
+
148
+ for (let tablePattern of tablePatterns) {
149
+ const schema = tablePattern.schema;
150
+
151
+ let patternResult: PatternResult = {
152
+ schema: schema,
153
+ pattern: tablePattern.tablePattern,
154
+ wildcard: tablePattern.isWildcard
155
+ };
156
+ result.push(patternResult);
157
+
158
+ if (tablePattern.isWildcard) {
159
+ patternResult.tables = [];
160
+ const prefix = tablePattern.tablePrefix;
161
+ const results = await util.retriedQuery(db, {
162
+ statement: `SELECT c.oid AS relid, c.relname AS table_name
163
+ FROM pg_class c
164
+ JOIN pg_namespace n ON n.oid = c.relnamespace
165
+ WHERE n.nspname = $1
166
+ AND c.relkind = 'r'
167
+ AND c.relname LIKE $2`,
168
+ params: [
169
+ { type: 'varchar', value: schema },
170
+ { type: 'varchar', value: tablePattern.tablePattern }
171
+ ]
172
+ });
173
+
174
+ for (let row of pgwire.pgwireRows(results)) {
175
+ const name = row.table_name as string;
176
+ const relationId = row.relid as number;
177
+ if (!name.startsWith(prefix)) {
178
+ continue;
179
+ }
180
+ const details = await getDebugTableInfo({
181
+ db,
182
+ name,
183
+ publicationName,
184
+ connectionTag,
185
+ tablePattern,
186
+ relationId,
187
+ syncRules: syncRules
188
+ });
189
+ patternResult.tables.push(details);
190
+ }
191
+ } else {
192
+ const results = await util.retriedQuery(db, {
193
+ statement: `SELECT c.oid AS relid, c.relname AS table_name
194
+ FROM pg_class c
195
+ JOIN pg_namespace n ON n.oid = c.relnamespace
196
+ WHERE n.nspname = $1
197
+ AND c.relkind = 'r'
198
+ AND c.relname = $2`,
199
+ params: [
200
+ { type: 'varchar', value: schema },
201
+ { type: 'varchar', value: tablePattern.tablePattern }
202
+ ]
203
+ });
204
+ if (results.rows.length == 0) {
205
+ // Table not found
206
+ patternResult.table = await getDebugTableInfo({
207
+ db,
208
+ name: tablePattern.name,
209
+ publicationName,
210
+ connectionTag,
211
+ tablePattern,
212
+ relationId: null,
213
+ syncRules: syncRules
214
+ });
215
+ } else {
216
+ const row = pgwire.pgwireRows(results)[0];
217
+ const name = row.table_name as string;
218
+ const relationId = row.relid as number;
219
+ patternResult.table = await getDebugTableInfo({
220
+ db,
221
+ name,
222
+ publicationName,
223
+ connectionTag,
224
+ tablePattern,
225
+ relationId,
226
+ syncRules: syncRules
227
+ });
228
+ }
229
+ }
230
+ }
231
+ return result;
232
+ }
233
+
234
+ export interface GetDebugTableInfoOptions {
235
+ db: pgwire.PgClient;
236
+ name: string;
237
+ publicationName: string;
238
+ connectionTag: string;
239
+ tablePattern: sync_rules.TablePattern;
240
+ relationId: number | null;
241
+ syncRules: sync_rules.SqlSyncRules;
242
+ }
243
+
244
+ export async function getDebugTableInfo(options: GetDebugTableInfoOptions): Promise<service_types.TableInfo> {
245
+ const { db, name, publicationName, connectionTag, tablePattern, relationId, syncRules } = options;
246
+ const schema = tablePattern.schema;
247
+ let id_columns_result: ReplicaIdentityResult | undefined = undefined;
248
+ let id_columns_error = null;
249
+
250
+ if (relationId != null) {
251
+ try {
252
+ id_columns_result = await getReplicationIdentityColumns(db, relationId);
253
+ } catch (e) {
254
+ id_columns_error = { level: 'fatal', message: e.message };
255
+ }
256
+ }
257
+
258
+ const id_columns = id_columns_result?.replicationColumns ?? [];
259
+
260
+ const sourceTable = new storage.SourceTable(0, connectionTag, relationId ?? 0, schema, name, id_columns, true);
261
+
262
+ const syncData = syncRules.tableSyncsData(sourceTable);
263
+ const syncParameters = syncRules.tableSyncsParameters(sourceTable);
264
+
265
+ if (relationId == null) {
266
+ return {
267
+ schema: schema,
268
+ name: name,
269
+ pattern: tablePattern.isWildcard ? tablePattern.tablePattern : undefined,
270
+ replication_id: [],
271
+ data_queries: syncData,
272
+ parameter_queries: syncParameters,
273
+ // Also
274
+ errors: [{ level: 'warning', message: `Table ${sourceTable.qualifiedName} not found.` }]
275
+ };
276
+ }
277
+ if (id_columns.length == 0 && id_columns_error == null) {
278
+ let message = `No replication id found for ${sourceTable.qualifiedName}. Replica identity: ${id_columns_result?.replicationIdentity}.`;
279
+ if (id_columns_result?.replicationIdentity == 'default') {
280
+ message += ' Configure a primary key on the table.';
281
+ }
282
+ id_columns_error = { level: 'fatal', message };
283
+ }
284
+
285
+ let selectError = null;
286
+ try {
287
+ await pg_utils.retriedQuery(db, `SELECT * FROM ${sourceTable.escapedIdentifier} LIMIT 1`);
288
+ } catch (e) {
289
+ selectError = { level: 'fatal', message: e.message };
290
+ }
291
+
292
+ let replicateError = null;
293
+
294
+ const publications = await pg_utils.retriedQuery(db, {
295
+ statement: `SELECT tablename FROM pg_publication_tables WHERE pubname = $1 AND schemaname = $2 AND tablename = $3`,
296
+ params: [
297
+ { type: 'varchar', value: publicationName },
298
+ { type: 'varchar', value: tablePattern.schema },
299
+ { type: 'varchar', value: name }
300
+ ]
301
+ });
302
+ if (publications.rows.length == 0) {
303
+ replicateError = {
304
+ level: 'fatal',
305
+ message: `Table ${sourceTable.qualifiedName} is not part of publication '${publicationName}'. Run: \`ALTER PUBLICATION ${publicationName} ADD TABLE ${sourceTable.qualifiedName}\`.`
306
+ };
307
+ }
308
+
309
+ return {
310
+ schema: schema,
311
+ name: name,
312
+ pattern: tablePattern.isWildcard ? tablePattern.tablePattern : undefined,
313
+ replication_id: id_columns.map((c) => c.name),
314
+ data_queries: syncData,
315
+ parameter_queries: syncParameters,
316
+ errors: [id_columns_error, selectError, replicateError].filter(
317
+ (error) => error != null
318
+ ) as service_types.ReplicationError[]
319
+ };
320
+ }
321
+
322
+ export async function cleanUpReplicationSlot(slotName: string, db: pgwire.PgClient): Promise<void> {
323
+ logger.info(`Cleaning up Postgres replication slot: ${slotName}...`);
324
+
325
+ await db.query({
326
+ statement: 'SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE slot_name = $1',
327
+ params: [{ type: 'varchar', value: slotName }]
328
+ });
329
+ }
@@ -0,0 +1,159 @@
1
+ import * as service_types from '@powersync/service-types';
2
+ import * as t from 'ts-codec';
3
+ import * as urijs from 'uri-js';
4
+
5
+ export const POSTGRES_CONNECTION_TYPE = 'postgresql' as const;
6
+
7
+ export interface NormalizedPostgresConnectionConfig {
8
+ id: string;
9
+ tag: string;
10
+
11
+ hostname: string;
12
+ port: number;
13
+ database: string;
14
+
15
+ username: string;
16
+ password: string;
17
+
18
+ sslmode: 'verify-full' | 'verify-ca' | 'disable';
19
+ cacert: string | undefined;
20
+
21
+ client_certificate: string | undefined;
22
+ client_private_key: string | undefined;
23
+ }
24
+
25
+ export const PostgresConnectionConfig = service_types.configFile.dataSourceConfig.and(
26
+ t.object({
27
+ type: t.literal(POSTGRES_CONNECTION_TYPE),
28
+ /** Unique identifier for the connection - optional when a single connection is present. */
29
+ id: t.string.optional(),
30
+ /** Tag used as reference in sync rules. Defaults to "default". Does not have to be unique. */
31
+ tag: t.string.optional(),
32
+ uri: t.string.optional(),
33
+ hostname: t.string.optional(),
34
+ port: service_types.configFile.portCodec.optional(),
35
+ username: t.string.optional(),
36
+ password: t.string.optional(),
37
+ database: t.string.optional(),
38
+
39
+ /** Defaults to verify-full */
40
+ sslmode: t.literal('verify-full').or(t.literal('verify-ca')).or(t.literal('disable')).optional(),
41
+ /** Required for verify-ca, optional for verify-full */
42
+ cacert: t.string.optional(),
43
+
44
+ client_certificate: t.string.optional(),
45
+ client_private_key: t.string.optional(),
46
+
47
+ /** Expose database credentials */
48
+ demo_database: t.boolean.optional(),
49
+
50
+ /**
51
+ * Prefix for the slot name. Defaults to "powersync_"
52
+ */
53
+ slot_name_prefix: t.string.optional()
54
+ })
55
+ );
56
+
57
+ /**
58
+ * Config input specified when starting services
59
+ */
60
+ export type PostgresConnectionConfig = t.Decoded<typeof PostgresConnectionConfig>;
61
+
62
+ /**
63
+ * Resolved version of {@link PostgresConnectionConfig}
64
+ */
65
+ export type ResolvedConnectionConfig = PostgresConnectionConfig & NormalizedPostgresConnectionConfig;
66
+
67
+ /**
68
+ * Validate and normalize connection options.
69
+ *
70
+ * Returns destructured options.
71
+ */
72
+ export function normalizeConnectionConfig(options: PostgresConnectionConfig): NormalizedPostgresConnectionConfig {
73
+ let uri: urijs.URIComponents;
74
+ if (options.uri) {
75
+ uri = urijs.parse(options.uri);
76
+ if (uri.scheme != 'postgresql' && uri.scheme != 'postgres') {
77
+ `Invalid URI - protocol must be postgresql, got ${uri.scheme}`;
78
+ } else if (uri.scheme != 'postgresql') {
79
+ uri.scheme = 'postgresql';
80
+ }
81
+ } else {
82
+ uri = urijs.parse('postgresql:///');
83
+ }
84
+
85
+ const hostname = options.hostname ?? uri.host ?? '';
86
+ const port = validatePort(options.port ?? uri.port ?? 5432);
87
+
88
+ const database = options.database ?? uri.path?.substring(1) ?? '';
89
+
90
+ const [uri_username, uri_password] = (uri.userinfo ?? '').split(':');
91
+
92
+ const username = options.username ?? uri_username ?? '';
93
+ const password = options.password ?? uri_password ?? '';
94
+
95
+ const sslmode = options.sslmode ?? 'verify-full'; // Configuration not supported via URI
96
+ const cacert = options.cacert;
97
+
98
+ if (sslmode == 'verify-ca' && cacert == null) {
99
+ throw new Error('Explicit cacert is required for sslmode=verify-ca');
100
+ }
101
+
102
+ if (hostname == '') {
103
+ throw new Error(`hostname required`);
104
+ }
105
+
106
+ if (username == '') {
107
+ throw new Error(`username required`);
108
+ }
109
+
110
+ if (password == '') {
111
+ throw new Error(`password required`);
112
+ }
113
+
114
+ if (database == '') {
115
+ throw new Error(`database required`);
116
+ }
117
+
118
+ return {
119
+ id: options.id ?? 'default',
120
+ tag: options.tag ?? 'default',
121
+
122
+ hostname,
123
+ port,
124
+ database,
125
+
126
+ username,
127
+ password,
128
+ sslmode,
129
+ cacert,
130
+
131
+ client_certificate: options.client_certificate ?? undefined,
132
+ client_private_key: options.client_private_key ?? undefined
133
+ };
134
+ }
135
+
136
+ /**
137
+ * Check whether the port is in a "safe" range.
138
+ *
139
+ * We do not support connecting to "privileged" ports.
140
+ */
141
+ export function validatePort(port: string | number): number {
142
+ if (typeof port == 'string') {
143
+ port = parseInt(port);
144
+ }
145
+ if (port >= 1024 && port <= 49151) {
146
+ return port;
147
+ } else {
148
+ throw new Error(`Port ${port} not supported`);
149
+ }
150
+ }
151
+
152
+ /**
153
+ * Construct a postgres URI, without username, password or ssl options.
154
+ *
155
+ * Only contains hostname, port, database.
156
+ */
157
+ export function baseUri(options: NormalizedPostgresConnectionConfig) {
158
+ return `postgresql://${options.hostname}:${options.port}/${options.database}`;
159
+ }