@powersync/service-module-postgres 0.19.2 → 0.19.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api/PostgresRouteAPIAdapter.d.ts +1 -1
- package/dist/api/PostgresRouteAPIAdapter.js +63 -72
- package/dist/api/PostgresRouteAPIAdapter.js.map +1 -1
- package/dist/module/PostgresModule.js.map +1 -1
- package/dist/replication/MissingReplicationSlotError.d.ts +41 -0
- package/dist/replication/MissingReplicationSlotError.js +33 -0
- package/dist/replication/MissingReplicationSlotError.js.map +1 -0
- package/dist/replication/PostgresErrorRateLimiter.js +1 -1
- package/dist/replication/PostgresErrorRateLimiter.js.map +1 -1
- package/dist/replication/SnapshotQuery.js +2 -2
- package/dist/replication/SnapshotQuery.js.map +1 -1
- package/dist/replication/WalStream.d.ts +37 -14
- package/dist/replication/WalStream.js +145 -41
- package/dist/replication/WalStream.js.map +1 -1
- package/dist/replication/WalStreamReplicationJob.d.ts +1 -1
- package/dist/replication/WalStreamReplicationJob.js +7 -4
- package/dist/replication/WalStreamReplicationJob.js.map +1 -1
- package/dist/replication/WalStreamReplicator.d.ts +0 -1
- package/dist/replication/WalStreamReplicator.js +0 -22
- package/dist/replication/WalStreamReplicator.js.map +1 -1
- package/dist/replication/replication-index.d.ts +3 -1
- package/dist/replication/replication-index.js +3 -1
- package/dist/replication/replication-index.js.map +1 -1
- package/dist/replication/replication-utils.d.ts +3 -11
- package/dist/replication/replication-utils.js +101 -164
- package/dist/replication/replication-utils.js.map +1 -1
- package/dist/replication/wal-budget-utils.d.ts +23 -0
- package/dist/replication/wal-budget-utils.js +57 -0
- package/dist/replication/wal-budget-utils.js.map +1 -0
- package/dist/types/registry.js +1 -1
- package/dist/types/registry.js.map +1 -1
- package/package.json +15 -11
- package/sql/check-source-configuration.plpgsql +13 -0
- package/sql/debug-tables-info-batched.plpgsql +230 -0
- package/CHANGELOG.md +0 -843
- package/src/api/PostgresRouteAPIAdapter.ts +0 -356
- package/src/index.ts +0 -1
- package/src/module/PostgresModule.ts +0 -122
- package/src/replication/ConnectionManagerFactory.ts +0 -33
- package/src/replication/PgManager.ts +0 -122
- package/src/replication/PgRelation.ts +0 -41
- package/src/replication/PostgresErrorRateLimiter.ts +0 -48
- package/src/replication/SnapshotQuery.ts +0 -213
- package/src/replication/WalStream.ts +0 -1157
- package/src/replication/WalStreamReplicationJob.ts +0 -138
- package/src/replication/WalStreamReplicator.ts +0 -79
- package/src/replication/replication-index.ts +0 -5
- package/src/replication/replication-utils.ts +0 -398
- package/src/types/registry.ts +0 -275
- package/src/types/resolver.ts +0 -227
- package/src/types/types.ts +0 -44
- package/src/utils/application-name.ts +0 -8
- package/src/utils/migration_lib.ts +0 -80
- package/src/utils/populate_test_data.ts +0 -37
- package/src/utils/populate_test_data_worker.ts +0 -53
- package/src/utils/postgres_version.ts +0 -8
- package/test/src/checkpoints.test.ts +0 -86
- package/test/src/chunked_snapshots.test.ts +0 -161
- package/test/src/env.ts +0 -11
- package/test/src/large_batch.test.ts +0 -241
- package/test/src/pg_test.test.ts +0 -729
- package/test/src/resuming_snapshots.test.ts +0 -160
- package/test/src/route_api_adapter.test.ts +0 -62
- package/test/src/schema_changes.test.ts +0 -655
- package/test/src/setup.ts +0 -12
- package/test/src/slow_tests.test.ts +0 -519
- package/test/src/storage_combination.test.ts +0 -35
- package/test/src/types/registry.test.ts +0 -149
- package/test/src/util.ts +0 -151
- package/test/src/validation.test.ts +0 -63
- package/test/src/wal_stream.test.ts +0 -607
- package/test/src/wal_stream_utils.ts +0 -284
- package/test/tsconfig.json +0 -27
- package/tsconfig.json +0 -34
- package/tsconfig.tsbuildinfo +0 -1
- package/vitest.config.ts +0 -3
|
@@ -1,138 +0,0 @@
|
|
|
1
|
-
import { container, logger } from '@powersync/lib-services-framework';
|
|
2
|
-
import { PgManager } from './PgManager.js';
|
|
3
|
-
import { MissingReplicationSlotError, sendKeepAlive, WalStream } from './WalStream.js';
|
|
4
|
-
|
|
5
|
-
import { replication } from '@powersync/service-core';
|
|
6
|
-
import { ConnectionManagerFactory } from './ConnectionManagerFactory.js';
|
|
7
|
-
import { getApplicationName } from '../utils/application-name.js';
|
|
8
|
-
|
|
9
|
-
export interface WalStreamReplicationJobOptions extends replication.AbstractReplicationJobOptions {
|
|
10
|
-
connectionFactory: ConnectionManagerFactory;
|
|
11
|
-
}
|
|
12
|
-
|
|
13
|
-
export class WalStreamReplicationJob extends replication.AbstractReplicationJob {
|
|
14
|
-
private connectionFactory: ConnectionManagerFactory;
|
|
15
|
-
private connectionManager: PgManager | null = null;
|
|
16
|
-
private lastStream: WalStream | null = null;
|
|
17
|
-
|
|
18
|
-
constructor(options: WalStreamReplicationJobOptions) {
|
|
19
|
-
super(options);
|
|
20
|
-
this.logger = logger.child({ prefix: `[${this.slotName}] ` });
|
|
21
|
-
this.connectionFactory = options.connectionFactory;
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
/**
|
|
25
|
-
* Postgres on RDS writes performs a WAL checkpoint every 5 minutes by default, which creates a new 64MB file.
|
|
26
|
-
*
|
|
27
|
-
* The old WAL files are only deleted once no replication slot still references it.
|
|
28
|
-
*
|
|
29
|
-
* Unfortunately, when there are no changes to the db, the database creates new WAL files without the replication slot
|
|
30
|
-
* advancing**.
|
|
31
|
-
*
|
|
32
|
-
* As a workaround, we write a new message every couple of minutes, to make sure that the replication slot advances.
|
|
33
|
-
*
|
|
34
|
-
* **This may be a bug in pgwire or how we're using it.
|
|
35
|
-
*/
|
|
36
|
-
async keepAlive() {
|
|
37
|
-
if (this.connectionManager) {
|
|
38
|
-
try {
|
|
39
|
-
await sendKeepAlive(this.connectionManager.pool);
|
|
40
|
-
} catch (e) {
|
|
41
|
-
this.logger.warn(`KeepAlive failed, unable to post to WAL`, e);
|
|
42
|
-
}
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
|
|
46
|
-
get slotName() {
|
|
47
|
-
return this.options.storage.slot_name;
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
async replicate() {
|
|
51
|
-
try {
|
|
52
|
-
await this.replicateOnce();
|
|
53
|
-
} catch (e) {
|
|
54
|
-
// Fatal exception
|
|
55
|
-
|
|
56
|
-
if (!this.isStopped) {
|
|
57
|
-
// Ignore aborted errors
|
|
58
|
-
|
|
59
|
-
this.logger.error(`Replication error`, e);
|
|
60
|
-
if (e.cause != null) {
|
|
61
|
-
// Example:
|
|
62
|
-
// PgError.conn_ended: Unable to do postgres query on ended connection
|
|
63
|
-
// at PgConnection.stream (file:///.../powersync/node_modules/.pnpm/github.com+kagis+pgwire@f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87/node_modules/pgwire/mod.js:315:13)
|
|
64
|
-
// at stream.next (<anonymous>)
|
|
65
|
-
// at PgResult.fromStream (file:///.../powersync/node_modules/.pnpm/github.com+kagis+pgwire@f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87/node_modules/pgwire/mod.js:1174:22)
|
|
66
|
-
// at PgConnection.query (file:///.../powersync/node_modules/.pnpm/github.com+kagis+pgwire@f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87/node_modules/pgwire/mod.js:311:21)
|
|
67
|
-
// at WalStream.startInitialReplication (file:///.../powersync/powersync-service/lib/replication/WalStream.js:266:22)
|
|
68
|
-
// ...
|
|
69
|
-
// cause: TypeError: match is not iterable
|
|
70
|
-
// at timestamptzToSqlite (file:///.../powersync/packages/jpgwire/dist/util.js:140:50)
|
|
71
|
-
// at PgType.decode (file:///.../powersync/packages/jpgwire/dist/pgwire_types.js:25:24)
|
|
72
|
-
// at PgConnection._recvDataRow (file:///.../powersync/packages/jpgwire/dist/util.js:88:22)
|
|
73
|
-
// at PgConnection._recvMessages (file:///.../powersync/node_modules/.pnpm/github.com+kagis+pgwire@f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87/node_modules/pgwire/mod.js:656:30)
|
|
74
|
-
// at PgConnection._ioloopAttempt (file:///.../powersync/node_modules/.pnpm/github.com+kagis+pgwire@f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87/node_modules/pgwire/mod.js:563:20)
|
|
75
|
-
// at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
|
|
76
|
-
// at async PgConnection._ioloop (file:///.../powersync/node_modules/.pnpm/github.com+kagis+pgwire@f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87/node_modules/pgwire/mod.js:517:14),
|
|
77
|
-
// [Symbol(pg.ErrorCode)]: 'conn_ended',
|
|
78
|
-
// [Symbol(pg.ErrorResponse)]: undefined
|
|
79
|
-
// }
|
|
80
|
-
// Without this additional log, the cause would not be visible in the logs.
|
|
81
|
-
this.logger.error(`cause`, e.cause);
|
|
82
|
-
}
|
|
83
|
-
// Report the error if relevant, before retrying
|
|
84
|
-
container.reporter.captureException(e, {
|
|
85
|
-
metadata: {
|
|
86
|
-
replication_slot: this.slotName
|
|
87
|
-
}
|
|
88
|
-
});
|
|
89
|
-
// This sets the retry delay
|
|
90
|
-
this.rateLimiter.reportError(e);
|
|
91
|
-
}
|
|
92
|
-
|
|
93
|
-
if (e instanceof MissingReplicationSlotError) {
|
|
94
|
-
// This stops replication on this slot and restarts with a new slot
|
|
95
|
-
await this.options.storage.factory.restartReplication(this.storage.group_id);
|
|
96
|
-
}
|
|
97
|
-
|
|
98
|
-
// No need to rethrow - the error is already logged, and retry behavior is the same on error
|
|
99
|
-
} finally {
|
|
100
|
-
this.abortController.abort();
|
|
101
|
-
}
|
|
102
|
-
}
|
|
103
|
-
|
|
104
|
-
async replicateOnce() {
|
|
105
|
-
// New connections on every iteration (every error with retry),
|
|
106
|
-
// otherwise we risk repeating errors related to the connection,
|
|
107
|
-
// such as caused by cached PG schemas.
|
|
108
|
-
const connectionManager = this.connectionFactory.create({
|
|
109
|
-
// Pool connections are only used intermittently.
|
|
110
|
-
idleTimeout: 30_000,
|
|
111
|
-
maxSize: 2,
|
|
112
|
-
applicationName: getApplicationName()
|
|
113
|
-
});
|
|
114
|
-
this.connectionManager = connectionManager;
|
|
115
|
-
try {
|
|
116
|
-
await this.rateLimiter?.waitUntilAllowed({ signal: this.abortController.signal });
|
|
117
|
-
if (this.isStopped) {
|
|
118
|
-
return;
|
|
119
|
-
}
|
|
120
|
-
const stream = new WalStream({
|
|
121
|
-
logger: this.logger,
|
|
122
|
-
abort_signal: this.abortController.signal,
|
|
123
|
-
storage: this.options.storage,
|
|
124
|
-
metrics: this.options.metrics,
|
|
125
|
-
connections: connectionManager
|
|
126
|
-
});
|
|
127
|
-
this.lastStream = stream;
|
|
128
|
-
await stream.replicate();
|
|
129
|
-
} finally {
|
|
130
|
-
this.connectionManager = null;
|
|
131
|
-
await connectionManager.end();
|
|
132
|
-
}
|
|
133
|
-
}
|
|
134
|
-
|
|
135
|
-
async getReplicationLagMillis(): Promise<number | undefined> {
|
|
136
|
-
return this.lastStream?.getReplicationLagMillis();
|
|
137
|
-
}
|
|
138
|
-
}
|
|
@@ -1,79 +0,0 @@
|
|
|
1
|
-
import { replication, storage } from '@powersync/service-core';
|
|
2
|
-
import { PostgresModule } from '../module/PostgresModule.js';
|
|
3
|
-
import { getApplicationName } from '../utils/application-name.js';
|
|
4
|
-
import { ConnectionManagerFactory } from './ConnectionManagerFactory.js';
|
|
5
|
-
import { cleanUpReplicationSlot } from './replication-utils.js';
|
|
6
|
-
import { WalStreamReplicationJob } from './WalStreamReplicationJob.js';
|
|
7
|
-
|
|
8
|
-
export interface WalStreamReplicatorOptions extends replication.AbstractReplicatorOptions {
|
|
9
|
-
connectionFactory: ConnectionManagerFactory;
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
export class WalStreamReplicator extends replication.AbstractReplicator<WalStreamReplicationJob> {
|
|
13
|
-
private readonly connectionFactory: ConnectionManagerFactory;
|
|
14
|
-
|
|
15
|
-
constructor(options: WalStreamReplicatorOptions) {
|
|
16
|
-
super(options);
|
|
17
|
-
this.connectionFactory = options.connectionFactory;
|
|
18
|
-
}
|
|
19
|
-
|
|
20
|
-
createJob(options: replication.CreateJobOptions): WalStreamReplicationJob {
|
|
21
|
-
return new WalStreamReplicationJob({
|
|
22
|
-
id: this.createJobId(options.storage.group_id),
|
|
23
|
-
storage: options.storage,
|
|
24
|
-
metrics: this.metrics,
|
|
25
|
-
connectionFactory: this.connectionFactory,
|
|
26
|
-
lock: options.lock,
|
|
27
|
-
rateLimiter: this.rateLimiter
|
|
28
|
-
});
|
|
29
|
-
}
|
|
30
|
-
|
|
31
|
-
async cleanUp(syncRulesStorage: storage.SyncRulesBucketStorage): Promise<void> {
|
|
32
|
-
const connectionManager = this.connectionFactory.create({
|
|
33
|
-
applicationName: getApplicationName(),
|
|
34
|
-
idleTimeout: 30_000,
|
|
35
|
-
maxSize: 1
|
|
36
|
-
});
|
|
37
|
-
try {
|
|
38
|
-
// TODO: Slot_name will likely have to come from a different source in the future
|
|
39
|
-
await cleanUpReplicationSlot(syncRulesStorage.slot_name, connectionManager.pool);
|
|
40
|
-
} finally {
|
|
41
|
-
await connectionManager.end();
|
|
42
|
-
}
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
async stop(): Promise<void> {
|
|
46
|
-
await super.stop();
|
|
47
|
-
await this.connectionFactory.shutdown();
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
async testConnection() {
|
|
51
|
-
return await PostgresModule.testConnection(this.connectionFactory.dbConnectionConfig);
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
async getReplicationLagMillis(): Promise<number | undefined> {
|
|
55
|
-
const lag = await super.getReplicationLagMillis();
|
|
56
|
-
if (lag != null) {
|
|
57
|
-
return lag;
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
// Booting or in an error loop. Check last active replication status.
|
|
61
|
-
// This includes sync rules in an ERROR state.
|
|
62
|
-
const content = await this.storage.getActiveSyncRulesContent();
|
|
63
|
-
if (content == null) {
|
|
64
|
-
return undefined;
|
|
65
|
-
}
|
|
66
|
-
// Measure the lag from the last commit or keepalive timestamp.
|
|
67
|
-
// This is not 100% accurate since it is the commit time in the storage db rather than
|
|
68
|
-
// the source db, but it's the best we have for postgres.
|
|
69
|
-
|
|
70
|
-
const checkpointTs = content.last_checkpoint_ts?.getTime() ?? 0;
|
|
71
|
-
const keepaliveTs = content.last_keepalive_ts?.getTime() ?? 0;
|
|
72
|
-
const latestTs = Math.max(checkpointTs, keepaliveTs);
|
|
73
|
-
if (latestTs != 0) {
|
|
74
|
-
return Date.now() - latestTs;
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
return undefined;
|
|
78
|
-
}
|
|
79
|
-
}
|
|
@@ -1,398 +0,0 @@
|
|
|
1
|
-
import * as pgwire from '@powersync/service-jpgwire';
|
|
2
|
-
|
|
3
|
-
import * as lib_postgres from '@powersync/lib-service-postgres';
|
|
4
|
-
import { ErrorCode, logger, ServiceAssertionError, ServiceError } from '@powersync/lib-services-framework';
|
|
5
|
-
import { PatternResult, storage } from '@powersync/service-core';
|
|
6
|
-
import * as sync_rules from '@powersync/service-sync-rules';
|
|
7
|
-
import * as service_types from '@powersync/service-types';
|
|
8
|
-
import { ReplicationIdentity } from './PgRelation.js';
|
|
9
|
-
|
|
10
|
-
export interface ReplicaIdentityResult {
|
|
11
|
-
replicationColumns: storage.ColumnDescriptor[];
|
|
12
|
-
replicationIdentity: ReplicationIdentity;
|
|
13
|
-
}
|
|
14
|
-
|
|
15
|
-
export async function getPrimaryKeyColumns(
|
|
16
|
-
db: pgwire.PgClient,
|
|
17
|
-
relationId: number,
|
|
18
|
-
mode: 'primary' | 'replident'
|
|
19
|
-
): Promise<storage.ColumnDescriptor[]> {
|
|
20
|
-
const indexFlag = mode == 'primary' ? `i.indisprimary` : `i.indisreplident`;
|
|
21
|
-
const attrRows = await lib_postgres.retriedQuery(db, {
|
|
22
|
-
statement: `SELECT a.attname as name, a.atttypid as typeid, t.typname as type, a.attnum as attnum
|
|
23
|
-
FROM pg_index i
|
|
24
|
-
JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = ANY (i.indkey)
|
|
25
|
-
JOIN pg_type t ON a.atttypid = t.oid
|
|
26
|
-
WHERE i.indrelid = $1::oid
|
|
27
|
-
AND ${indexFlag}
|
|
28
|
-
AND a.attnum > 0
|
|
29
|
-
ORDER BY a.attnum`,
|
|
30
|
-
params: [{ value: relationId, type: 'int4' }]
|
|
31
|
-
});
|
|
32
|
-
|
|
33
|
-
return attrRows.rows.map((row) => {
|
|
34
|
-
return {
|
|
35
|
-
name: row.decodeWithoutCustomTypes(0) as string,
|
|
36
|
-
typeId: row.decodeWithoutCustomTypes(1) as number
|
|
37
|
-
} satisfies storage.ColumnDescriptor;
|
|
38
|
-
});
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
export async function getAllColumns(db: pgwire.PgClient, relationId: number): Promise<storage.ColumnDescriptor[]> {
|
|
42
|
-
const attrRows = await lib_postgres.retriedQuery(db, {
|
|
43
|
-
statement: `SELECT a.attname as name, a.atttypid as typeid, t.typname as type, a.attnum as attnum
|
|
44
|
-
FROM pg_attribute a
|
|
45
|
-
JOIN pg_type t ON a.atttypid = t.oid
|
|
46
|
-
WHERE a.attrelid = $1::oid
|
|
47
|
-
AND attnum > 0
|
|
48
|
-
ORDER BY a.attnum`,
|
|
49
|
-
params: [{ type: 'varchar', value: relationId }]
|
|
50
|
-
});
|
|
51
|
-
return attrRows.rows.map((row) => {
|
|
52
|
-
return {
|
|
53
|
-
name: row.decodeWithoutCustomTypes(0) as string,
|
|
54
|
-
typeId: row.decodeWithoutCustomTypes(1) as number
|
|
55
|
-
} satisfies storage.ColumnDescriptor;
|
|
56
|
-
});
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
export async function getReplicationIdentityColumns(
|
|
60
|
-
db: pgwire.PgClient,
|
|
61
|
-
relationId: number
|
|
62
|
-
): Promise<ReplicaIdentityResult> {
|
|
63
|
-
const rows = await lib_postgres.retriedQuery(db, {
|
|
64
|
-
statement: `SELECT CASE relreplident
|
|
65
|
-
WHEN 'd' THEN 'default'
|
|
66
|
-
WHEN 'n' THEN 'nothing'
|
|
67
|
-
WHEN 'f' THEN 'full'
|
|
68
|
-
WHEN 'i' THEN 'index'
|
|
69
|
-
END AS replica_identity
|
|
70
|
-
FROM pg_class
|
|
71
|
-
WHERE oid = $1::oid LIMIT 1`,
|
|
72
|
-
params: [{ type: 'int8', value: relationId }]
|
|
73
|
-
});
|
|
74
|
-
const idType: string = rows.rows[0]?.decodeWithoutCustomTypes(0);
|
|
75
|
-
if (idType == 'nothing' || idType == null) {
|
|
76
|
-
return { replicationIdentity: 'nothing', replicationColumns: [] };
|
|
77
|
-
} else if (idType == 'full') {
|
|
78
|
-
return { replicationIdentity: 'full', replicationColumns: await getAllColumns(db, relationId) };
|
|
79
|
-
} else if (idType == 'default') {
|
|
80
|
-
return {
|
|
81
|
-
replicationIdentity: 'default',
|
|
82
|
-
replicationColumns: await getPrimaryKeyColumns(db, relationId, 'primary')
|
|
83
|
-
};
|
|
84
|
-
} else if (idType == 'index') {
|
|
85
|
-
return {
|
|
86
|
-
replicationIdentity: 'index',
|
|
87
|
-
replicationColumns: await getPrimaryKeyColumns(db, relationId, 'replident')
|
|
88
|
-
};
|
|
89
|
-
} else {
|
|
90
|
-
return { replicationIdentity: 'nothing', replicationColumns: [] };
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
export async function checkSourceConfiguration(db: pgwire.PgClient, publicationName: string): Promise<void> {
|
|
95
|
-
// Check basic config
|
|
96
|
-
await lib_postgres.retriedQuery(
|
|
97
|
-
db,
|
|
98
|
-
`DO $$
|
|
99
|
-
BEGIN
|
|
100
|
-
if current_setting('wal_level') is distinct from 'logical' then
|
|
101
|
-
raise exception 'wal_level must be set to ''logical'', your database has it set to ''%''. Please edit your config file and restart PostgreSQL.', current_setting('wal_level');
|
|
102
|
-
end if;
|
|
103
|
-
if (current_setting('max_replication_slots')::int >= 1) is not true then
|
|
104
|
-
raise exception 'Your max_replication_slots setting is too low, it must be greater than 1. Please edit your config file and restart PostgreSQL.';
|
|
105
|
-
end if;
|
|
106
|
-
if (current_setting('max_wal_senders')::int >= 1) is not true then
|
|
107
|
-
raise exception 'Your max_wal_senders setting is too low, it must be greater than 1. Please edit your config file and restart PostgreSQL.';
|
|
108
|
-
end if;
|
|
109
|
-
end;
|
|
110
|
-
$$ LANGUAGE plpgsql;`
|
|
111
|
-
);
|
|
112
|
-
|
|
113
|
-
// Check that publication exists
|
|
114
|
-
const rs = await lib_postgres.retriedQuery(db, {
|
|
115
|
-
statement: `SELECT * FROM pg_publication WHERE pubname = $1`,
|
|
116
|
-
params: [{ type: 'varchar', value: publicationName }]
|
|
117
|
-
});
|
|
118
|
-
const row = pgwire.pgwireRows(rs)[0];
|
|
119
|
-
if (row == null) {
|
|
120
|
-
throw new ServiceError(
|
|
121
|
-
ErrorCode.PSYNC_S1141,
|
|
122
|
-
`Publication '${publicationName}' does not exist. Run: \`CREATE PUBLICATION ${publicationName} FOR ALL TABLES\`, or read the documentation for details.`
|
|
123
|
-
);
|
|
124
|
-
}
|
|
125
|
-
if (row.pubinsert == false || row.pubupdate == false || row.pubdelete == false || row.pubtruncate == false) {
|
|
126
|
-
throw new ServiceError(
|
|
127
|
-
ErrorCode.PSYNC_S1142,
|
|
128
|
-
`Publication '${publicationName}' does not publish all changes. Create a publication using \`WITH (publish = "insert, update, delete, truncate")\` (the default).`
|
|
129
|
-
);
|
|
130
|
-
}
|
|
131
|
-
if (row.pubviaroot) {
|
|
132
|
-
throw new ServiceError(
|
|
133
|
-
ErrorCode.PSYNC_S1143,
|
|
134
|
-
`'${publicationName}' uses publish_via_partition_root, which is not supported.`
|
|
135
|
-
);
|
|
136
|
-
}
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
export async function checkTableRls(
|
|
140
|
-
db: pgwire.PgClient,
|
|
141
|
-
relationId: number
|
|
142
|
-
): Promise<{ canRead: boolean; message?: string }> {
|
|
143
|
-
const rs = await lib_postgres.retriedQuery(db, {
|
|
144
|
-
statement: `
|
|
145
|
-
WITH user_info AS (
|
|
146
|
-
SELECT
|
|
147
|
-
current_user as username,
|
|
148
|
-
r.rolsuper,
|
|
149
|
-
r.rolbypassrls
|
|
150
|
-
FROM pg_roles r
|
|
151
|
-
WHERE r.rolname = current_user
|
|
152
|
-
)
|
|
153
|
-
SELECT
|
|
154
|
-
c.relname as tablename,
|
|
155
|
-
c.relrowsecurity as rls_enabled,
|
|
156
|
-
u.username as username,
|
|
157
|
-
u.rolsuper as is_superuser,
|
|
158
|
-
u.rolbypassrls as bypasses_rls
|
|
159
|
-
FROM pg_class c
|
|
160
|
-
CROSS JOIN user_info u
|
|
161
|
-
WHERE c.oid = $1::oid;
|
|
162
|
-
`,
|
|
163
|
-
params: [{ type: 'int4', value: relationId }]
|
|
164
|
-
});
|
|
165
|
-
|
|
166
|
-
const rows = pgwire.pgwireRows<{
|
|
167
|
-
rls_enabled: boolean;
|
|
168
|
-
tablename: string;
|
|
169
|
-
username: string;
|
|
170
|
-
is_superuser: boolean;
|
|
171
|
-
bypasses_rls: boolean;
|
|
172
|
-
}>(rs);
|
|
173
|
-
if (rows.length == 0) {
|
|
174
|
-
// Not expected, since we already got the oid
|
|
175
|
-
throw new ServiceAssertionError(`Table with OID ${relationId} does not exist.`);
|
|
176
|
-
}
|
|
177
|
-
const row = rows[0];
|
|
178
|
-
if (row.is_superuser || row.bypasses_rls) {
|
|
179
|
-
// Bypasses RLS automatically.
|
|
180
|
-
return { canRead: true };
|
|
181
|
-
}
|
|
182
|
-
|
|
183
|
-
if (row.rls_enabled) {
|
|
184
|
-
// Don't skip, since we _may_ still be able to get results.
|
|
185
|
-
return {
|
|
186
|
-
canRead: false,
|
|
187
|
-
message: `[${ErrorCode.PSYNC_S1145}] Row Level Security is enabled on table "${row.tablename}". To make sure that ${row.username} can read the table, run: 'ALTER ROLE ${row.username} BYPASSRLS'.`
|
|
188
|
-
};
|
|
189
|
-
}
|
|
190
|
-
|
|
191
|
-
return { canRead: true };
|
|
192
|
-
}
|
|
193
|
-
|
|
194
|
-
export interface GetDebugTablesInfoOptions {
|
|
195
|
-
db: pgwire.PgClient;
|
|
196
|
-
publicationName: string;
|
|
197
|
-
connectionTag: string;
|
|
198
|
-
tablePatterns: sync_rules.TablePattern[];
|
|
199
|
-
syncRules: sync_rules.SyncConfig;
|
|
200
|
-
}
|
|
201
|
-
|
|
202
|
-
export async function getDebugTablesInfo(options: GetDebugTablesInfoOptions): Promise<PatternResult[]> {
|
|
203
|
-
const { db, publicationName, connectionTag, tablePatterns, syncRules } = options;
|
|
204
|
-
let result: PatternResult[] = [];
|
|
205
|
-
|
|
206
|
-
for (let tablePattern of tablePatterns) {
|
|
207
|
-
const schema = tablePattern.schema;
|
|
208
|
-
|
|
209
|
-
let patternResult: PatternResult = {
|
|
210
|
-
schema: schema,
|
|
211
|
-
pattern: tablePattern.tablePattern,
|
|
212
|
-
wildcard: tablePattern.isWildcard
|
|
213
|
-
};
|
|
214
|
-
result.push(patternResult);
|
|
215
|
-
|
|
216
|
-
if (tablePattern.isWildcard) {
|
|
217
|
-
patternResult.tables = [];
|
|
218
|
-
const prefix = tablePattern.tablePrefix;
|
|
219
|
-
const results = await lib_postgres.retriedQuery(db, {
|
|
220
|
-
statement: `SELECT c.oid AS relid, c.relname AS table_name
|
|
221
|
-
FROM pg_class c
|
|
222
|
-
JOIN pg_namespace n ON n.oid = c.relnamespace
|
|
223
|
-
WHERE n.nspname = $1
|
|
224
|
-
AND c.relkind = 'r'
|
|
225
|
-
AND c.relname LIKE $2`,
|
|
226
|
-
params: [
|
|
227
|
-
{ type: 'varchar', value: schema },
|
|
228
|
-
{ type: 'varchar', value: tablePattern.tablePattern }
|
|
229
|
-
]
|
|
230
|
-
});
|
|
231
|
-
|
|
232
|
-
for (let row of pgwire.pgwireRows(results)) {
|
|
233
|
-
const name = row.table_name as string;
|
|
234
|
-
const relationId = row.relid as number;
|
|
235
|
-
if (!name.startsWith(prefix)) {
|
|
236
|
-
continue;
|
|
237
|
-
}
|
|
238
|
-
const details = await getDebugTableInfo({
|
|
239
|
-
db,
|
|
240
|
-
name,
|
|
241
|
-
publicationName,
|
|
242
|
-
connectionTag,
|
|
243
|
-
tablePattern,
|
|
244
|
-
relationId,
|
|
245
|
-
syncRules: syncRules
|
|
246
|
-
});
|
|
247
|
-
patternResult.tables.push(details);
|
|
248
|
-
}
|
|
249
|
-
} else {
|
|
250
|
-
const results = await lib_postgres.retriedQuery(db, {
|
|
251
|
-
statement: `SELECT c.oid AS relid, c.relname AS table_name
|
|
252
|
-
FROM pg_class c
|
|
253
|
-
JOIN pg_namespace n ON n.oid = c.relnamespace
|
|
254
|
-
WHERE n.nspname = $1
|
|
255
|
-
AND c.relkind = 'r'
|
|
256
|
-
AND c.relname = $2`,
|
|
257
|
-
params: [
|
|
258
|
-
{ type: 'varchar', value: schema },
|
|
259
|
-
{ type: 'varchar', value: tablePattern.tablePattern }
|
|
260
|
-
]
|
|
261
|
-
});
|
|
262
|
-
if (results.rows.length == 0) {
|
|
263
|
-
// Table not found
|
|
264
|
-
patternResult.table = await getDebugTableInfo({
|
|
265
|
-
db,
|
|
266
|
-
name: tablePattern.name,
|
|
267
|
-
publicationName,
|
|
268
|
-
connectionTag,
|
|
269
|
-
tablePattern,
|
|
270
|
-
relationId: null,
|
|
271
|
-
syncRules: syncRules
|
|
272
|
-
});
|
|
273
|
-
} else {
|
|
274
|
-
const row = pgwire.pgwireRows(results)[0];
|
|
275
|
-
const name = row.table_name as string;
|
|
276
|
-
const relationId = row.relid as number;
|
|
277
|
-
patternResult.table = await getDebugTableInfo({
|
|
278
|
-
db,
|
|
279
|
-
name,
|
|
280
|
-
publicationName,
|
|
281
|
-
connectionTag,
|
|
282
|
-
tablePattern,
|
|
283
|
-
relationId,
|
|
284
|
-
syncRules: syncRules
|
|
285
|
-
});
|
|
286
|
-
}
|
|
287
|
-
}
|
|
288
|
-
}
|
|
289
|
-
return result;
|
|
290
|
-
}
|
|
291
|
-
|
|
292
|
-
export interface GetDebugTableInfoOptions {
|
|
293
|
-
db: pgwire.PgClient;
|
|
294
|
-
name: string;
|
|
295
|
-
publicationName: string;
|
|
296
|
-
connectionTag: string;
|
|
297
|
-
tablePattern: sync_rules.TablePattern;
|
|
298
|
-
relationId: number | null;
|
|
299
|
-
syncRules: sync_rules.SyncConfig;
|
|
300
|
-
}
|
|
301
|
-
|
|
302
|
-
export async function getDebugTableInfo(options: GetDebugTableInfoOptions): Promise<service_types.TableInfo> {
|
|
303
|
-
const { db, name, publicationName, connectionTag, tablePattern, relationId, syncRules } = options;
|
|
304
|
-
const schema = tablePattern.schema;
|
|
305
|
-
let id_columns_result: ReplicaIdentityResult | undefined = undefined;
|
|
306
|
-
let id_columns_error = null;
|
|
307
|
-
|
|
308
|
-
if (relationId != null) {
|
|
309
|
-
try {
|
|
310
|
-
id_columns_result = await getReplicationIdentityColumns(db, relationId);
|
|
311
|
-
} catch (e) {
|
|
312
|
-
id_columns_error = { level: 'fatal', message: e.message };
|
|
313
|
-
}
|
|
314
|
-
}
|
|
315
|
-
|
|
316
|
-
const id_columns = id_columns_result?.replicationColumns ?? [];
|
|
317
|
-
|
|
318
|
-
const sourceTable = new storage.SourceTable({
|
|
319
|
-
id: '', // not used
|
|
320
|
-
connectionTag: connectionTag,
|
|
321
|
-
objectId: relationId ?? 0,
|
|
322
|
-
schema: schema,
|
|
323
|
-
name: name,
|
|
324
|
-
replicaIdColumns: id_columns,
|
|
325
|
-
snapshotComplete: true
|
|
326
|
-
});
|
|
327
|
-
|
|
328
|
-
const syncData = syncRules.tableSyncsData(sourceTable);
|
|
329
|
-
const syncParameters = syncRules.tableSyncsParameters(sourceTable);
|
|
330
|
-
|
|
331
|
-
if (relationId == null) {
|
|
332
|
-
return {
|
|
333
|
-
schema: schema,
|
|
334
|
-
name: name,
|
|
335
|
-
pattern: tablePattern.isWildcard ? tablePattern.tablePattern : undefined,
|
|
336
|
-
replication_id: [],
|
|
337
|
-
data_queries: syncData,
|
|
338
|
-
parameter_queries: syncParameters,
|
|
339
|
-
// Also
|
|
340
|
-
errors: [{ level: 'warning', message: `Table ${sourceTable.qualifiedName} not found.` }]
|
|
341
|
-
};
|
|
342
|
-
}
|
|
343
|
-
if (id_columns.length == 0 && id_columns_error == null) {
|
|
344
|
-
let message = `No replication id found for ${sourceTable.qualifiedName}. Replica identity: ${id_columns_result?.replicationIdentity}.`;
|
|
345
|
-
if (id_columns_result?.replicationIdentity == 'default') {
|
|
346
|
-
message += ' Configure a primary key on the table.';
|
|
347
|
-
}
|
|
348
|
-
id_columns_error = { level: 'fatal', message };
|
|
349
|
-
}
|
|
350
|
-
|
|
351
|
-
let selectError = null;
|
|
352
|
-
try {
|
|
353
|
-
await lib_postgres.retriedQuery(db, `SELECT * FROM ${sourceTable.qualifiedName} LIMIT 1`);
|
|
354
|
-
} catch (e) {
|
|
355
|
-
selectError = { level: 'fatal', message: e.message };
|
|
356
|
-
}
|
|
357
|
-
|
|
358
|
-
let replicateError = null;
|
|
359
|
-
|
|
360
|
-
const publications = await lib_postgres.retriedQuery(db, {
|
|
361
|
-
statement: `SELECT tablename FROM pg_publication_tables WHERE pubname = $1 AND schemaname = $2 AND tablename = $3`,
|
|
362
|
-
params: [
|
|
363
|
-
{ type: 'varchar', value: publicationName },
|
|
364
|
-
{ type: 'varchar', value: tablePattern.schema },
|
|
365
|
-
{ type: 'varchar', value: name }
|
|
366
|
-
]
|
|
367
|
-
});
|
|
368
|
-
if (publications.rows.length == 0) {
|
|
369
|
-
replicateError = {
|
|
370
|
-
level: 'fatal',
|
|
371
|
-
message: `Table ${sourceTable.qualifiedName} is not part of publication '${publicationName}'. Run: \`ALTER PUBLICATION ${publicationName} ADD TABLE ${sourceTable.qualifiedName}\`.`
|
|
372
|
-
};
|
|
373
|
-
}
|
|
374
|
-
|
|
375
|
-
const rlsCheck = await checkTableRls(db, relationId);
|
|
376
|
-
const rlsError = rlsCheck.canRead ? null : { message: rlsCheck.message!, level: 'warning' };
|
|
377
|
-
|
|
378
|
-
return {
|
|
379
|
-
schema: schema,
|
|
380
|
-
name: name,
|
|
381
|
-
pattern: tablePattern.isWildcard ? tablePattern.tablePattern : undefined,
|
|
382
|
-
replication_id: id_columns.map((c) => c.name),
|
|
383
|
-
data_queries: syncData,
|
|
384
|
-
parameter_queries: syncParameters,
|
|
385
|
-
errors: [id_columns_error, selectError, replicateError, rlsError].filter(
|
|
386
|
-
(error) => error != null
|
|
387
|
-
) as service_types.ReplicationError[]
|
|
388
|
-
};
|
|
389
|
-
}
|
|
390
|
-
|
|
391
|
-
export async function cleanUpReplicationSlot(slotName: string, db: pgwire.PgClient): Promise<void> {
|
|
392
|
-
logger.info(`Cleaning up Postgres replication slot: ${slotName}...`);
|
|
393
|
-
|
|
394
|
-
await db.query({
|
|
395
|
-
statement: 'SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE slot_name = $1',
|
|
396
|
-
params: [{ type: 'varchar', value: slotName }]
|
|
397
|
-
});
|
|
398
|
-
}
|