@powersync/service-module-postgres 0.0.0-dev-20241007145127 → 0.0.0-dev-20241015210820
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +10 -6
- package/dist/api/PostgresRouteAPIAdapter.d.ts +2 -2
- package/dist/api/PostgresRouteAPIAdapter.js +4 -3
- package/dist/api/PostgresRouteAPIAdapter.js.map +1 -1
- package/dist/module/PostgresModule.d.ts +2 -2
- package/dist/module/PostgresModule.js +4 -4
- package/dist/module/PostgresModule.js.map +1 -1
- package/dist/replication/WalStream.d.ts +1 -1
- package/dist/replication/WalStream.js +14 -14
- package/dist/replication/WalStream.js.map +1 -1
- package/dist/replication/WalStreamReplicationJob.js +1 -1
- package/dist/replication/WalStreamReplicationJob.js.map +1 -1
- package/dist/replication/WalStreamReplicator.d.ts +2 -2
- package/dist/replication/WalStreamReplicator.js +1 -1
- package/dist/replication/WalStreamReplicator.js.map +1 -1
- package/dist/types/types.js +1 -1
- package/package.json +6 -6
- package/src/api/PostgresRouteAPIAdapter.ts +5 -4
- package/src/module/PostgresModule.ts +6 -13
- package/src/replication/WalStream.ts +16 -17
- package/src/replication/WalStreamReplicationJob.ts +1 -1
- package/src/replication/WalStreamReplicator.ts +2 -2
- package/src/types/types.ts +1 -1
- package/test/src/slow_tests.test.ts +5 -5
- package/test/src/util.ts +6 -4
- package/test/src/wal_stream_utils.ts +9 -12
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
import * as pgwire from '@powersync/service-jpgwire';
|
|
2
|
-
import * as util from '../utils/pgwire_utils.js';
|
|
3
1
|
import { container, errors, logger } from '@powersync/lib-services-framework';
|
|
2
|
+
import { getUuidReplicaIdentityBson, Metrics, SourceEntityDescriptor, storage } from '@powersync/service-core';
|
|
3
|
+
import * as pgwire from '@powersync/service-jpgwire';
|
|
4
4
|
import { DatabaseInputRow, SqliteRow, SqlSyncRules, TablePattern, toSyncRulesRow } from '@powersync/service-sync-rules';
|
|
5
|
+
import * as pg_utils from '../utils/pgwire_utils.js';
|
|
6
|
+
import { PgManager } from './PgManager.js';
|
|
5
7
|
import { getPgOutputRelation, getRelId } from './PgRelation.js';
|
|
6
|
-
import { getUuidReplicaIdentityBson, Metrics, SourceEntityDescriptor, storage } from '@powersync/service-core';
|
|
7
8
|
import { checkSourceConfiguration, getReplicationIdentityColumns } from './replication-utils.js';
|
|
8
|
-
import { PgManager } from './PgManager.js';
|
|
9
9
|
|
|
10
10
|
export const ZERO_LSN = '00000000/00000000';
|
|
11
11
|
export const PUBLICATION_NAME = 'powersync';
|
|
@@ -60,7 +60,7 @@ export class WalStream {
|
|
|
60
60
|
// Ping to speed up cancellation of streaming replication
|
|
61
61
|
// We're not using pg_snapshot here, since it could be in the middle of
|
|
62
62
|
// an initial replication transaction.
|
|
63
|
-
const promise =
|
|
63
|
+
const promise = pg_utils.retriedQuery(
|
|
64
64
|
this.connections.pool,
|
|
65
65
|
`SELECT * FROM pg_logical_emit_message(false, 'powersync', 'ping')`
|
|
66
66
|
);
|
|
@@ -347,7 +347,6 @@ WHERE oid = $1::regclass`,
|
|
|
347
347
|
for (let table of tables) {
|
|
348
348
|
await this.snapshotTable(batch, db, table);
|
|
349
349
|
await batch.markSnapshotDone([table], lsn);
|
|
350
|
-
|
|
351
350
|
await touch();
|
|
352
351
|
}
|
|
353
352
|
}
|
|
@@ -395,10 +394,10 @@ WHERE oid = $1::regclass`,
|
|
|
395
394
|
throw new Error(`Aborted initial replication of ${this.slot_name}`);
|
|
396
395
|
}
|
|
397
396
|
|
|
398
|
-
for (
|
|
397
|
+
for (const record of WalStream.getQueryData(rows)) {
|
|
399
398
|
// This auto-flushes when the batch reaches its size limit
|
|
400
399
|
await batch.save({
|
|
401
|
-
tag:
|
|
400
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
402
401
|
sourceTable: table,
|
|
403
402
|
before: undefined,
|
|
404
403
|
beforeReplicaId: undefined,
|
|
@@ -406,6 +405,7 @@ WHERE oid = $1::regclass`,
|
|
|
406
405
|
afterReplicaId: getUuidReplicaIdentityBson(record, table.replicaIdColumns)
|
|
407
406
|
});
|
|
408
407
|
}
|
|
408
|
+
|
|
409
409
|
at += rows.length;
|
|
410
410
|
Metrics.getInstance().rows_replicated_total.add(rows.length);
|
|
411
411
|
|
|
@@ -495,9 +495,9 @@ WHERE oid = $1::regclass`,
|
|
|
495
495
|
|
|
496
496
|
if (msg.tag == 'insert') {
|
|
497
497
|
Metrics.getInstance().rows_replicated_total.add(1);
|
|
498
|
-
const baseRecord =
|
|
498
|
+
const baseRecord = pg_utils.constructAfterRecord(msg);
|
|
499
499
|
return await batch.save({
|
|
500
|
-
tag:
|
|
500
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
501
501
|
sourceTable: table,
|
|
502
502
|
before: undefined,
|
|
503
503
|
beforeReplicaId: undefined,
|
|
@@ -508,10 +508,10 @@ WHERE oid = $1::regclass`,
|
|
|
508
508
|
Metrics.getInstance().rows_replicated_total.add(1);
|
|
509
509
|
// "before" may be null if the replica id columns are unchanged
|
|
510
510
|
// It's fine to treat that the same as an insert.
|
|
511
|
-
const before =
|
|
512
|
-
const after =
|
|
511
|
+
const before = pg_utils.constructBeforeRecord(msg);
|
|
512
|
+
const after = pg_utils.constructAfterRecord(msg);
|
|
513
513
|
return await batch.save({
|
|
514
|
-
tag:
|
|
514
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
515
515
|
sourceTable: table,
|
|
516
516
|
before: before,
|
|
517
517
|
beforeReplicaId: before ? getUuidReplicaIdentityBson(before, table.replicaIdColumns) : undefined,
|
|
@@ -520,10 +520,10 @@ WHERE oid = $1::regclass`,
|
|
|
520
520
|
});
|
|
521
521
|
} else if (msg.tag == 'delete') {
|
|
522
522
|
Metrics.getInstance().rows_replicated_total.add(1);
|
|
523
|
-
const before =
|
|
523
|
+
const before = pg_utils.constructBeforeRecord(msg)!;
|
|
524
524
|
|
|
525
525
|
return await batch.save({
|
|
526
|
-
tag:
|
|
526
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
527
527
|
sourceTable: table,
|
|
528
528
|
before: before,
|
|
529
529
|
beforeReplicaId: getUuidReplicaIdentityBson(before, table.replicaIdColumns),
|
|
@@ -592,7 +592,6 @@ WHERE oid = $1::regclass`,
|
|
|
592
592
|
// chunkLastLsn may come from normal messages in the chunk,
|
|
593
593
|
// or from a PrimaryKeepalive message.
|
|
594
594
|
const { messages, lastLsn: chunkLastLsn } = chunk;
|
|
595
|
-
|
|
596
595
|
for (const msg of messages) {
|
|
597
596
|
if (msg.tag == 'relation') {
|
|
598
597
|
await this.handleRelation(batch, getPgOutputRelation(msg), true);
|
|
@@ -609,7 +608,7 @@ WHERE oid = $1::regclass`,
|
|
|
609
608
|
}
|
|
610
609
|
|
|
611
610
|
count += 1;
|
|
612
|
-
|
|
611
|
+
await this.writeChange(batch, msg);
|
|
613
612
|
}
|
|
614
613
|
}
|
|
615
614
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { MissingReplicationSlotError, WalStream } from './WalStream.js';
|
|
2
1
|
import { container } from '@powersync/lib-services-framework';
|
|
3
2
|
import { PgManager } from './PgManager.js';
|
|
3
|
+
import { MissingReplicationSlotError, WalStream } from './WalStream.js';
|
|
4
4
|
|
|
5
5
|
import { replication } from '@powersync/service-core';
|
|
6
6
|
import { ConnectionManagerFactory } from './ConnectionManagerFactory.js';
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { WalStreamReplicationJob } from './WalStreamReplicationJob.js';
|
|
1
|
+
import { replication, storage } from '@powersync/service-core';
|
|
3
2
|
import { ConnectionManagerFactory } from './ConnectionManagerFactory.js';
|
|
4
3
|
import { cleanUpReplicationSlot } from './replication-utils.js';
|
|
4
|
+
import { WalStreamReplicationJob } from './WalStreamReplicationJob.js';
|
|
5
5
|
|
|
6
6
|
export interface WalStreamReplicatorOptions extends replication.AbstractReplicatorOptions {
|
|
7
7
|
connectionFactory: ConnectionManagerFactory;
|
package/src/types/types.ts
CHANGED
|
@@ -22,7 +22,7 @@ export interface NormalizedPostgresConnectionConfig {
|
|
|
22
22
|
client_private_key: string | undefined;
|
|
23
23
|
}
|
|
24
24
|
|
|
25
|
-
export const PostgresConnectionConfig = service_types.configFile.
|
|
25
|
+
export const PostgresConnectionConfig = service_types.configFile.DataSourceConfig.and(
|
|
26
26
|
t.object({
|
|
27
27
|
type: t.literal(POSTGRES_CONNECTION_TYPE),
|
|
28
28
|
/** Unique identifier for the connection - optional when a single connection is present. */
|
|
@@ -2,16 +2,16 @@ import * as bson from 'bson';
|
|
|
2
2
|
import { afterEach, describe, expect, test } from 'vitest';
|
|
3
3
|
import { WalStream, WalStreamOptions } from '../../src/replication/WalStream.js';
|
|
4
4
|
import { env } from './env.js';
|
|
5
|
-
import {
|
|
5
|
+
import { clearTestDb, connectPgPool, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js';
|
|
6
6
|
|
|
7
7
|
import * as pgwire from '@powersync/service-jpgwire';
|
|
8
8
|
import { SqliteRow } from '@powersync/service-sync-rules';
|
|
9
9
|
|
|
10
10
|
import { mapOpEntry, MongoBucketStorage } from '@/storage/storage-index.js';
|
|
11
|
-
import
|
|
11
|
+
import { reduceBucket, validateCompactedBucket } from '@core-tests/bucket_validation.js';
|
|
12
12
|
import { MONGO_STORAGE_FACTORY, StorageFactory } from '@core-tests/util.js';
|
|
13
13
|
import { PgManager } from '@module/replication/PgManager.js';
|
|
14
|
-
import
|
|
14
|
+
import * as timers from 'node:timers/promises';
|
|
15
15
|
|
|
16
16
|
describe('slow tests - mongodb', function () {
|
|
17
17
|
// These are slow, inconsistent tests.
|
|
@@ -82,7 +82,7 @@ bucket_definitions:
|
|
|
82
82
|
- SELECT * FROM "test_data"
|
|
83
83
|
`;
|
|
84
84
|
const syncRules = await f.updateSyncRules({ content: syncRuleContent });
|
|
85
|
-
|
|
85
|
+
using storage = f.getInstance(syncRules);
|
|
86
86
|
abortController = new AbortController();
|
|
87
87
|
const options: WalStreamOptions = {
|
|
88
88
|
abort_signal: abortController.signal,
|
|
@@ -234,7 +234,7 @@ bucket_definitions:
|
|
|
234
234
|
- SELECT id, description FROM "test_data"
|
|
235
235
|
`;
|
|
236
236
|
const syncRules = await f.updateSyncRules({ content: syncRuleContent });
|
|
237
|
-
|
|
237
|
+
using storage = f.getInstance(syncRules);
|
|
238
238
|
|
|
239
239
|
// 1. Setup some base data that will be replicated in initial replication
|
|
240
240
|
await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
|
package/test/src/util.ts
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
|
+
import { connectMongo } from '@core-tests/util.js';
|
|
1
2
|
import * as types from '@module/types/types.js';
|
|
2
3
|
import * as pg_utils from '@module/utils/pgwire_utils.js';
|
|
4
|
+
import { logger } from '@powersync/lib-services-framework';
|
|
3
5
|
import { BucketStorageFactory, Metrics, MongoBucketStorage, OpId } from '@powersync/service-core';
|
|
4
6
|
import * as pgwire from '@powersync/service-jpgwire';
|
|
5
|
-
import { env } from './env.js';
|
|
6
7
|
import { pgwireRows } from '@powersync/service-jpgwire';
|
|
7
|
-
import {
|
|
8
|
-
import { connectMongo } from '@core-tests/util.js';
|
|
8
|
+
import { env } from './env.js';
|
|
9
9
|
|
|
10
10
|
// The metrics need to be initialized before they can be used
|
|
11
11
|
await Metrics.initialise({
|
|
@@ -35,7 +35,9 @@ export const INITIALIZED_MONGO_STORAGE_FACTORY: StorageFactory = async () => {
|
|
|
35
35
|
|
|
36
36
|
await db.clear();
|
|
37
37
|
|
|
38
|
-
return new MongoBucketStorage(db, {
|
|
38
|
+
return new MongoBucketStorage(db, {
|
|
39
|
+
slot_name_prefix: 'test_'
|
|
40
|
+
});
|
|
39
41
|
};
|
|
40
42
|
|
|
41
43
|
export async function clearTestDb(db: pgwire.PgClient) {
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import { BucketStorageFactory, SyncRulesBucketStorage } from '@powersync/service-core';
|
|
2
|
-
import * as pgwire from '@powersync/service-jpgwire';
|
|
3
|
-
import { TEST_CONNECTION_OPTIONS, clearTestDb, getClientCheckpoint } from './util.js';
|
|
4
|
-
import { WalStream, WalStreamOptions, PUBLICATION_NAME } from '@module/replication/WalStream.js';
|
|
5
1
|
import { fromAsync } from '@core-tests/stream_utils.js';
|
|
6
2
|
import { PgManager } from '@module/replication/PgManager.js';
|
|
3
|
+
import { PUBLICATION_NAME, WalStream, WalStreamOptions } from '@module/replication/WalStream.js';
|
|
4
|
+
import { BucketStorageFactory, SyncRulesBucketStorage } from '@powersync/service-core';
|
|
5
|
+
import * as pgwire from '@powersync/service-jpgwire';
|
|
6
|
+
import { clearTestDb, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js';
|
|
7
7
|
|
|
8
8
|
/**
|
|
9
9
|
* Tests operating on the wal stream need to configure the stream and manage asynchronous
|
|
@@ -20,16 +20,12 @@ export function walStreamTest(
|
|
|
20
20
|
const connectionManager = new PgManager(TEST_CONNECTION_OPTIONS, {});
|
|
21
21
|
|
|
22
22
|
await clearTestDb(connectionManager.pool);
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
await test(context);
|
|
26
|
-
} finally {
|
|
27
|
-
await context.dispose();
|
|
28
|
-
}
|
|
23
|
+
await using context = new WalStreamTestContext(f, connectionManager);
|
|
24
|
+
await test(context);
|
|
29
25
|
};
|
|
30
26
|
}
|
|
31
27
|
|
|
32
|
-
export class WalStreamTestContext {
|
|
28
|
+
export class WalStreamTestContext implements AsyncDisposable {
|
|
33
29
|
private _walStream?: WalStream;
|
|
34
30
|
private abortController = new AbortController();
|
|
35
31
|
private streamPromise?: Promise<void>;
|
|
@@ -41,10 +37,11 @@ export class WalStreamTestContext {
|
|
|
41
37
|
public connectionManager: PgManager
|
|
42
38
|
) {}
|
|
43
39
|
|
|
44
|
-
async
|
|
40
|
+
async [Symbol.asyncDispose]() {
|
|
45
41
|
this.abortController.abort();
|
|
46
42
|
await this.streamPromise;
|
|
47
43
|
await this.connectionManager.destroy();
|
|
44
|
+
this.storage?.[Symbol.dispose]();
|
|
48
45
|
}
|
|
49
46
|
|
|
50
47
|
get pool() {
|