@powersync/service-module-postgres-storage 0.7.5 → 0.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +33 -0
- package/dist/.tsbuildinfo +1 -1
- package/dist/@types/migrations/scripts/1749024804042-snapshot-progress.d.ts +3 -0
- package/dist/@types/storage/PostgresSyncRulesStorage.d.ts +1 -2
- package/dist/@types/storage/PostgresTestStorageFactoryGenerator.d.ts +5 -1
- package/dist/@types/storage/batch/PostgresBucketBatch.d.ts +8 -2
- package/dist/@types/storage/checkpoints/PostgresWriteCheckpointAPI.d.ts +2 -3
- package/dist/@types/storage/sync-rules/PostgresPersistedSyncRulesContent.d.ts +1 -0
- package/dist/@types/types/models/SourceTable.d.ts +3 -0
- package/dist/@types/types/models/SyncRules.d.ts +4 -0
- package/dist/migrations/scripts/1684951997326-init.js.map +1 -1
- package/dist/migrations/scripts/1749024804042-snapshot-progress.js +110 -0
- package/dist/migrations/scripts/1749024804042-snapshot-progress.js.map +1 -0
- package/dist/storage/PostgresSyncRulesStorage.js +17 -8
- package/dist/storage/PostgresSyncRulesStorage.js.map +1 -1
- package/dist/storage/PostgresTestStorageFactoryGenerator.js +48 -37
- package/dist/storage/PostgresTestStorageFactoryGenerator.js.map +1 -1
- package/dist/storage/batch/PostgresBucketBatch.js +61 -12
- package/dist/storage/batch/PostgresBucketBatch.js.map +1 -1
- package/dist/storage/checkpoints/PostgresWriteCheckpointAPI.js +1 -5
- package/dist/storage/checkpoints/PostgresWriteCheckpointAPI.js.map +1 -1
- package/dist/storage/sync-rules/PostgresPersistedSyncRulesContent.js +2 -0
- package/dist/storage/sync-rules/PostgresPersistedSyncRulesContent.js.map +1 -1
- package/dist/types/models/SourceTable.js +5 -2
- package/dist/types/models/SourceTable.js.map +1 -1
- package/dist/types/models/SyncRules.js +4 -0
- package/dist/types/models/SyncRules.js.map +1 -1
- package/package.json +7 -7
- package/src/migrations/scripts/1684951997326-init.ts +0 -1
- package/src/migrations/scripts/1749024804042-snapshot-progress.ts +43 -0
- package/src/storage/PostgresSyncRulesStorage.ts +17 -11
- package/src/storage/PostgresTestStorageFactoryGenerator.ts +48 -36
- package/src/storage/batch/PostgresBucketBatch.ts +75 -15
- package/src/storage/checkpoints/PostgresWriteCheckpointAPI.ts +5 -10
- package/src/storage/sync-rules/PostgresPersistedSyncRulesContent.ts +2 -0
- package/src/types/models/SourceTable.ts +5 -2
- package/src/types/models/SyncRules.ts +4 -0
- package/test/src/migrations.test.ts +10 -2
- package/test/src/util.ts +7 -2
|
@@ -12,50 +12,62 @@ export type PostgresTestStorageOptions = {
|
|
|
12
12
|
migrationAgent?: (config: PostgresStorageConfigDecoded) => PostgresMigrationAgent;
|
|
13
13
|
};
|
|
14
14
|
|
|
15
|
-
export const
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
direction: framework.migrations.Direction.Down,
|
|
38
|
-
migrationContext: {
|
|
39
|
-
service_context: mockServiceContext
|
|
40
|
-
}
|
|
41
|
-
});
|
|
15
|
+
export const postgresTestSetup = (factoryOptions: PostgresTestStorageOptions) => {
|
|
16
|
+
const BASE_CONFIG = {
|
|
17
|
+
type: 'postgresql' as const,
|
|
18
|
+
uri: factoryOptions.url,
|
|
19
|
+
sslmode: 'disable' as const
|
|
20
|
+
};
|
|
21
|
+
|
|
22
|
+
const TEST_CONNECTION_OPTIONS = normalizePostgresStorageConfig(BASE_CONFIG);
|
|
23
|
+
|
|
24
|
+
const migrate = async (direction: framework.migrations.Direction) => {
|
|
25
|
+
await using migrationManager: PowerSyncMigrationManager = new framework.MigrationManager();
|
|
26
|
+
await using migrationAgent = factoryOptions.migrationAgent
|
|
27
|
+
? factoryOptions.migrationAgent(BASE_CONFIG)
|
|
28
|
+
: new PostgresMigrationAgent(BASE_CONFIG);
|
|
29
|
+
migrationManager.registerMigrationAgent(migrationAgent);
|
|
30
|
+
|
|
31
|
+
const mockServiceContext = { configuration: { storage: BASE_CONFIG } } as unknown as ServiceContext;
|
|
32
|
+
|
|
33
|
+
await migrationManager.migrate({
|
|
34
|
+
direction: framework.migrations.Direction.Down,
|
|
35
|
+
migrationContext: {
|
|
36
|
+
service_context: mockServiceContext
|
|
42
37
|
}
|
|
38
|
+
});
|
|
43
39
|
|
|
40
|
+
if (direction == framework.migrations.Direction.Up) {
|
|
44
41
|
await migrationManager.migrate({
|
|
45
42
|
direction: framework.migrations.Direction.Up,
|
|
46
43
|
migrationContext: {
|
|
47
44
|
service_context: mockServiceContext
|
|
48
45
|
}
|
|
49
46
|
});
|
|
50
|
-
|
|
51
|
-
return new PostgresBucketStorageFactory({
|
|
52
|
-
config: TEST_CONNECTION_OPTIONS,
|
|
53
|
-
slot_name_prefix: 'test_'
|
|
54
|
-
});
|
|
55
|
-
} catch (ex) {
|
|
56
|
-
// Vitest does not display these errors nicely when using the `await using` syntx
|
|
57
|
-
console.error(ex, ex.cause);
|
|
58
|
-
throw ex;
|
|
59
47
|
}
|
|
60
48
|
};
|
|
49
|
+
|
|
50
|
+
return {
|
|
51
|
+
factory: async (options?: TestStorageOptions) => {
|
|
52
|
+
try {
|
|
53
|
+
if (!options?.doNotClear) {
|
|
54
|
+
await migrate(framework.migrations.Direction.Up);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
return new PostgresBucketStorageFactory({
|
|
58
|
+
config: TEST_CONNECTION_OPTIONS,
|
|
59
|
+
slot_name_prefix: 'test_'
|
|
60
|
+
});
|
|
61
|
+
} catch (ex) {
|
|
62
|
+
// Vitest does not display these errors nicely when using the `await using` syntx
|
|
63
|
+
console.error(ex, ex.cause);
|
|
64
|
+
throw ex;
|
|
65
|
+
}
|
|
66
|
+
},
|
|
67
|
+
migrate
|
|
68
|
+
};
|
|
69
|
+
};
|
|
70
|
+
|
|
71
|
+
export const PostgresTestStorageFactoryGenerator = (factoryOptions: PostgresTestStorageOptions) => {
|
|
72
|
+
return postgresTestSetup(factoryOptions).factory;
|
|
61
73
|
};
|
|
@@ -4,12 +4,12 @@ import {
|
|
|
4
4
|
container,
|
|
5
5
|
ErrorCode,
|
|
6
6
|
errors,
|
|
7
|
-
|
|
7
|
+
Logger,
|
|
8
8
|
ReplicationAssertionError,
|
|
9
9
|
ServiceAssertionError,
|
|
10
10
|
ServiceError
|
|
11
11
|
} from '@powersync/lib-services-framework';
|
|
12
|
-
import { InternalOpId, storage, utils } from '@powersync/service-core';
|
|
12
|
+
import { BucketStorageMarkRecordUnavailable, InternalOpId, storage, utils } from '@powersync/service-core';
|
|
13
13
|
import * as sync_rules from '@powersync/service-sync-rules';
|
|
14
14
|
import * as timers from 'timers/promises';
|
|
15
15
|
import * as t from 'ts-codec';
|
|
@@ -22,6 +22,7 @@ import { cacheKey, encodedCacheKey, OperationBatch, RecordOperation } from './Op
|
|
|
22
22
|
import { PostgresPersistedBatch } from './PostgresPersistedBatch.js';
|
|
23
23
|
|
|
24
24
|
export interface PostgresBucketBatchOptions {
|
|
25
|
+
logger: Logger;
|
|
25
26
|
db: lib_postgres.DatabaseClient;
|
|
26
27
|
sync_rules: sync_rules.SqlSyncRules;
|
|
27
28
|
group_id: number;
|
|
@@ -35,6 +36,8 @@ export interface PostgresBucketBatchOptions {
|
|
|
35
36
|
*/
|
|
36
37
|
skip_existing_rows: boolean;
|
|
37
38
|
batch_limits: RequiredOperationBatchLimits;
|
|
39
|
+
|
|
40
|
+
markRecordUnavailable: BucketStorageMarkRecordUnavailable | undefined;
|
|
38
41
|
}
|
|
39
42
|
|
|
40
43
|
/**
|
|
@@ -54,6 +57,8 @@ export class PostgresBucketBatch
|
|
|
54
57
|
extends BaseObserver<storage.BucketBatchStorageListener>
|
|
55
58
|
implements storage.BucketStorageBatch
|
|
56
59
|
{
|
|
60
|
+
private logger: Logger;
|
|
61
|
+
|
|
57
62
|
public last_flushed_op: InternalOpId | null = null;
|
|
58
63
|
|
|
59
64
|
protected db: lib_postgres.DatabaseClient;
|
|
@@ -67,15 +72,18 @@ export class PostgresBucketBatch
|
|
|
67
72
|
protected readonly sync_rules: sync_rules.SqlSyncRules;
|
|
68
73
|
protected batch: OperationBatch | null;
|
|
69
74
|
private lastWaitingLogThrottled = 0;
|
|
75
|
+
private markRecordUnavailable: BucketStorageMarkRecordUnavailable | undefined;
|
|
70
76
|
|
|
71
77
|
constructor(protected options: PostgresBucketBatchOptions) {
|
|
72
78
|
super();
|
|
79
|
+
this.logger = options.logger;
|
|
73
80
|
this.db = options.db;
|
|
74
81
|
this.group_id = options.group_id;
|
|
75
82
|
this.last_checkpoint_lsn = options.last_checkpoint_lsn;
|
|
76
83
|
this.no_checkpoint_before_lsn = options.no_checkpoint_before_lsn;
|
|
77
84
|
this.write_checkpoint_batch = [];
|
|
78
85
|
this.sync_rules = options.sync_rules;
|
|
86
|
+
this.markRecordUnavailable = options.markRecordUnavailable;
|
|
79
87
|
this.batch = null;
|
|
80
88
|
this.persisted_op = null;
|
|
81
89
|
if (options.keep_alive_op) {
|
|
@@ -115,7 +123,7 @@ export class PostgresBucketBatch
|
|
|
115
123
|
return null;
|
|
116
124
|
}
|
|
117
125
|
|
|
118
|
-
logger.debug(`Saving ${record.tag}:${record.before?.id}/${record.after?.id}`);
|
|
126
|
+
this.logger.debug(`Saving ${record.tag}:${record.before?.id}/${record.after?.id}`);
|
|
119
127
|
|
|
120
128
|
this.batch ??= new OperationBatch(this.options.batch_limits);
|
|
121
129
|
this.batch.push(new RecordOperation(record));
|
|
@@ -279,13 +287,14 @@ export class PostgresBucketBatch
|
|
|
279
287
|
if (this.last_checkpoint_lsn != null && lsn < this.last_checkpoint_lsn) {
|
|
280
288
|
// When re-applying transactions, don't create a new checkpoint until
|
|
281
289
|
// we are past the last transaction.
|
|
282
|
-
logger.info(`Re-applied transaction ${lsn} - skipping checkpoint`);
|
|
290
|
+
this.logger.info(`Re-applied transaction ${lsn} - skipping checkpoint`);
|
|
291
|
+
// Cannot create a checkpoint yet - return false
|
|
283
292
|
return false;
|
|
284
293
|
}
|
|
285
294
|
|
|
286
295
|
if (lsn < this.no_checkpoint_before_lsn) {
|
|
287
296
|
if (Date.now() - this.lastWaitingLogThrottled > 5_000) {
|
|
288
|
-
logger.info(
|
|
297
|
+
this.logger.info(
|
|
289
298
|
`Waiting until ${this.no_checkpoint_before_lsn} before creating checkpoint, currently at ${lsn}. Persisted op: ${this.persisted_op}`
|
|
290
299
|
);
|
|
291
300
|
this.lastWaitingLogThrottled = Date.now();
|
|
@@ -305,12 +314,14 @@ export class PostgresBucketBatch
|
|
|
305
314
|
id = ${{ type: 'int4', value: this.group_id }}
|
|
306
315
|
`.execute();
|
|
307
316
|
|
|
317
|
+
// Cannot create a checkpoint yet - return false
|
|
308
318
|
return false;
|
|
309
319
|
}
|
|
310
320
|
|
|
311
321
|
// Don't create a checkpoint if there were no changes
|
|
312
322
|
if (!createEmptyCheckpoints && this.persisted_op == null) {
|
|
313
|
-
return
|
|
323
|
+
// Nothing to commit - return true
|
|
324
|
+
return true;
|
|
314
325
|
}
|
|
315
326
|
|
|
316
327
|
const now = new Date().toISOString();
|
|
@@ -333,6 +344,7 @@ export class PostgresBucketBatch
|
|
|
333
344
|
keepalive_op = ${{ type: 'int8', value: update.keepalive_op }},
|
|
334
345
|
last_fatal_error = ${{ type: 'varchar', value: update.last_fatal_error }},
|
|
335
346
|
snapshot_done = ${{ type: 'bool', value: update.snapshot_done }},
|
|
347
|
+
snapshot_lsn = NULL,
|
|
336
348
|
last_keepalive_ts = ${{ type: 1184, value: update.last_keepalive_ts }},
|
|
337
349
|
last_checkpoint = COALESCE(
|
|
338
350
|
${{ type: 'int8', value: update.last_checkpoint }},
|
|
@@ -371,7 +383,7 @@ export class PostgresBucketBatch
|
|
|
371
383
|
if (this.persisted_op != null) {
|
|
372
384
|
// The commit may have been skipped due to "no_checkpoint_before_lsn".
|
|
373
385
|
// Apply it now if relevant
|
|
374
|
-
logger.info(`Commit due to keepalive at ${lsn} / ${this.persisted_op}`);
|
|
386
|
+
this.logger.info(`Commit due to keepalive at ${lsn} / ${this.persisted_op}`);
|
|
375
387
|
return await this.commit(lsn);
|
|
376
388
|
}
|
|
377
389
|
|
|
@@ -379,6 +391,7 @@ export class PostgresBucketBatch
|
|
|
379
391
|
UPDATE sync_rules
|
|
380
392
|
SET
|
|
381
393
|
snapshot_done = ${{ type: 'bool', value: true }},
|
|
394
|
+
snapshot_lsn = NULL,
|
|
382
395
|
last_checkpoint_lsn = ${{ type: 'varchar', value: lsn }},
|
|
383
396
|
last_fatal_error = ${{ type: 'varchar', value: null }},
|
|
384
397
|
last_keepalive_ts = ${{ type: 1184, value: new Date().toISOString() }}
|
|
@@ -399,6 +412,16 @@ export class PostgresBucketBatch
|
|
|
399
412
|
return true;
|
|
400
413
|
}
|
|
401
414
|
|
|
415
|
+
async setSnapshotLsn(lsn: string): Promise<void> {
|
|
416
|
+
await this.db.sql`
|
|
417
|
+
UPDATE sync_rules
|
|
418
|
+
SET
|
|
419
|
+
snapshot_lsn = ${{ type: 'varchar', value: lsn }}
|
|
420
|
+
WHERE
|
|
421
|
+
id = ${{ type: 'int4', value: this.group_id }}
|
|
422
|
+
`.execute();
|
|
423
|
+
}
|
|
424
|
+
|
|
402
425
|
async markSnapshotDone(
|
|
403
426
|
tables: storage.SourceTable[],
|
|
404
427
|
no_checkpoint_before_lsn: string
|
|
@@ -409,7 +432,10 @@ export class PostgresBucketBatch
|
|
|
409
432
|
await db.sql`
|
|
410
433
|
UPDATE source_tables
|
|
411
434
|
SET
|
|
412
|
-
snapshot_done = ${{ type: 'bool', value: true }}
|
|
435
|
+
snapshot_done = ${{ type: 'bool', value: true }},
|
|
436
|
+
snapshot_total_estimated_count = NULL,
|
|
437
|
+
snapshot_replicated_count = NULL,
|
|
438
|
+
snapshot_last_key = NULL
|
|
413
439
|
WHERE
|
|
414
440
|
id IN (
|
|
415
441
|
SELECT
|
|
@@ -448,6 +474,31 @@ export class PostgresBucketBatch
|
|
|
448
474
|
});
|
|
449
475
|
}
|
|
450
476
|
|
|
477
|
+
async updateTableProgress(
|
|
478
|
+
table: storage.SourceTable,
|
|
479
|
+
progress: Partial<storage.TableSnapshotStatus>
|
|
480
|
+
): Promise<storage.SourceTable> {
|
|
481
|
+
const copy = table.clone();
|
|
482
|
+
const snapshotStatus = {
|
|
483
|
+
totalEstimatedCount: progress.totalEstimatedCount ?? copy.snapshotStatus?.totalEstimatedCount ?? 0,
|
|
484
|
+
replicatedCount: progress.replicatedCount ?? copy.snapshotStatus?.replicatedCount ?? 0,
|
|
485
|
+
lastKey: progress.lastKey ?? copy.snapshotStatus?.lastKey ?? null
|
|
486
|
+
};
|
|
487
|
+
copy.snapshotStatus = snapshotStatus;
|
|
488
|
+
|
|
489
|
+
await this.db.sql`
|
|
490
|
+
UPDATE source_tables
|
|
491
|
+
SET
|
|
492
|
+
snapshot_total_estimated_count = ${{ type: 'int4', value: snapshotStatus.totalEstimatedCount }},
|
|
493
|
+
snapshot_replicated_count = ${{ type: 'int4', value: snapshotStatus.replicatedCount }},
|
|
494
|
+
snapshot_last_key = ${{ type: 'bytea', value: snapshotStatus.lastKey }}
|
|
495
|
+
WHERE
|
|
496
|
+
id = ${{ type: 'varchar', value: table.id }}
|
|
497
|
+
`.execute();
|
|
498
|
+
|
|
499
|
+
return copy;
|
|
500
|
+
}
|
|
501
|
+
|
|
451
502
|
addCustomWriteCheckpoint(checkpoint: storage.BatchedCustomWriteCheckpointOptions): void {
|
|
452
503
|
this.write_checkpoint_batch.push({
|
|
453
504
|
...checkpoint,
|
|
@@ -657,10 +708,19 @@ export class PostgresBucketBatch
|
|
|
657
708
|
existingBuckets = [];
|
|
658
709
|
existingLookups = [];
|
|
659
710
|
// Log to help with debugging if there was a consistency issue
|
|
711
|
+
|
|
660
712
|
if (this.options.store_current_data) {
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
713
|
+
if (this.markRecordUnavailable != null) {
|
|
714
|
+
// This will trigger a "resnapshot" of the record.
|
|
715
|
+
// This is not relevant if storeCurrentData is false, since we'll get the full row
|
|
716
|
+
// directly in the replication stream.
|
|
717
|
+
this.markRecordUnavailable(record);
|
|
718
|
+
} else {
|
|
719
|
+
// Log to help with debugging if there was a consistency issue
|
|
720
|
+
this.logger.warn(
|
|
721
|
+
`Cannot find previous record for update on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}`
|
|
722
|
+
);
|
|
723
|
+
}
|
|
664
724
|
}
|
|
665
725
|
} else {
|
|
666
726
|
existingBuckets = result.buckets;
|
|
@@ -677,8 +737,8 @@ export class PostgresBucketBatch
|
|
|
677
737
|
existingBuckets = [];
|
|
678
738
|
existingLookups = [];
|
|
679
739
|
// Log to help with debugging if there was a consistency issue
|
|
680
|
-
if (this.options.store_current_data) {
|
|
681
|
-
logger.warn(
|
|
740
|
+
if (this.options.store_current_data && this.markRecordUnavailable == null) {
|
|
741
|
+
this.logger.warn(
|
|
682
742
|
`Cannot find previous record for delete on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}`
|
|
683
743
|
);
|
|
684
744
|
}
|
|
@@ -771,7 +831,7 @@ export class PostgresBucketBatch
|
|
|
771
831
|
}
|
|
772
832
|
}
|
|
773
833
|
);
|
|
774
|
-
logger.error(
|
|
834
|
+
this.logger.error(
|
|
775
835
|
`Failed to evaluate data query on ${record.sourceTable.qualifiedName}.${record.after?.id}: ${error.error}`
|
|
776
836
|
);
|
|
777
837
|
}
|
|
@@ -811,7 +871,7 @@ export class PostgresBucketBatch
|
|
|
811
871
|
}
|
|
812
872
|
}
|
|
813
873
|
);
|
|
814
|
-
logger.error(
|
|
874
|
+
this.logger.error(
|
|
815
875
|
`Failed to evaluate parameter query on ${record.sourceTable.qualifiedName}.${after.id}: ${error.error}`
|
|
816
876
|
);
|
|
817
877
|
}
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import * as lib_postgres from '@powersync/lib-service-postgres';
|
|
2
2
|
import * as framework from '@powersync/lib-services-framework';
|
|
3
|
-
import {
|
|
4
|
-
import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
|
|
3
|
+
import { InternalOpId, storage } from '@powersync/service-core';
|
|
5
4
|
import { models } from '../../types/types.js';
|
|
6
5
|
|
|
7
6
|
export type PostgresCheckpointAPIOptions = {
|
|
@@ -26,7 +25,10 @@ export class PostgresWriteCheckpointAPI implements storage.WriteCheckpointAPI {
|
|
|
26
25
|
this._mode = mode;
|
|
27
26
|
}
|
|
28
27
|
|
|
29
|
-
async batchCreateCustomWriteCheckpoints(
|
|
28
|
+
async batchCreateCustomWriteCheckpoints(
|
|
29
|
+
checkpoints: storage.CustomWriteCheckpointOptions[],
|
|
30
|
+
op_id: InternalOpId
|
|
31
|
+
): Promise<void> {
|
|
30
32
|
return batchCreateCustomWriteCheckpoints(this.db, checkpoints);
|
|
31
33
|
}
|
|
32
34
|
|
|
@@ -58,13 +60,6 @@ export class PostgresWriteCheckpointAPI implements storage.WriteCheckpointAPI {
|
|
|
58
60
|
return row!.write_checkpoint;
|
|
59
61
|
}
|
|
60
62
|
|
|
61
|
-
watchUserWriteCheckpoint(
|
|
62
|
-
options: storage.WatchUserWriteCheckpointOptions
|
|
63
|
-
): AsyncIterable<storage.WriteCheckpointResult> {
|
|
64
|
-
// Not used for Postgres currently
|
|
65
|
-
throw new Error('Method not implemented.');
|
|
66
|
-
}
|
|
67
|
-
|
|
68
63
|
async lastWriteCheckpoint(filters: storage.LastWriteCheckpointFilters): Promise<bigint | null> {
|
|
69
64
|
switch (this.writeCheckpointMode) {
|
|
70
65
|
case storage.WriteCheckpointMode.CUSTOM:
|
|
@@ -14,6 +14,7 @@ export class PostgresPersistedSyncRulesContent implements storage.PersistedSyncR
|
|
|
14
14
|
public readonly last_fatal_error: string | null;
|
|
15
15
|
public readonly last_keepalive_ts: Date | null;
|
|
16
16
|
public readonly last_checkpoint_ts: Date | null;
|
|
17
|
+
public readonly active: boolean;
|
|
17
18
|
current_lock: storage.ReplicationLock | null = null;
|
|
18
19
|
|
|
19
20
|
constructor(
|
|
@@ -27,6 +28,7 @@ export class PostgresPersistedSyncRulesContent implements storage.PersistedSyncR
|
|
|
27
28
|
this.last_fatal_error = row.last_fatal_error;
|
|
28
29
|
this.last_checkpoint_ts = row.last_checkpoint_ts ? new Date(row.last_checkpoint_ts) : null;
|
|
29
30
|
this.last_keepalive_ts = row.last_keepalive_ts ? new Date(row.last_keepalive_ts) : null;
|
|
31
|
+
this.active = row.state == 'ACTIVE';
|
|
30
32
|
}
|
|
31
33
|
|
|
32
34
|
parsed(options: storage.ParseSyncRulesOptions): storage.PersistedSyncRules {
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import * as t from 'ts-codec';
|
|
2
|
-
import { bigint, jsonb, jsonb_raw, pgwire_number } from '../codecs.js';
|
|
2
|
+
import { bigint, hexBuffer, jsonb, jsonb_raw, pgwire_number } from '../codecs.js';
|
|
3
3
|
|
|
4
4
|
export type StoredRelationId = {
|
|
5
5
|
object_id: string | number | undefined;
|
|
@@ -25,7 +25,10 @@ export const SourceTable = t.object({
|
|
|
25
25
|
schema_name: t.string,
|
|
26
26
|
table_name: t.string,
|
|
27
27
|
replica_id_columns: t.Null.or(jsonb(t.array(ColumnDescriptor))),
|
|
28
|
-
snapshot_done: t.boolean
|
|
28
|
+
snapshot_done: t.boolean,
|
|
29
|
+
snapshot_total_estimated_count: t.Null.or(bigint),
|
|
30
|
+
snapshot_replicated_count: t.Null.or(bigint),
|
|
31
|
+
snapshot_last_key: t.Null.or(hexBuffer)
|
|
29
32
|
});
|
|
30
33
|
|
|
31
34
|
export type SourceTable = t.Encoded<typeof SourceTable>;
|
|
@@ -11,6 +11,10 @@ export const SyncRules = t.object({
|
|
|
11
11
|
* Can only be false if state == PROCESSING.
|
|
12
12
|
*/
|
|
13
13
|
snapshot_done: t.boolean,
|
|
14
|
+
/**
|
|
15
|
+
* May be set if snapshot_done = false, if the replication stream requires it.
|
|
16
|
+
*/
|
|
17
|
+
snapshot_lsn: t.Null.or(t.string),
|
|
14
18
|
/**
|
|
15
19
|
* The last consistent checkpoint.
|
|
16
20
|
*
|
|
@@ -1,15 +1,23 @@
|
|
|
1
|
-
import { describe, expect, it } from 'vitest';
|
|
1
|
+
import { beforeEach, describe, expect, it } from 'vitest';
|
|
2
2
|
|
|
3
|
+
import { Direction } from '@powersync/lib-services-framework';
|
|
3
4
|
import { register } from '@powersync/service-core-tests';
|
|
4
5
|
import { PostgresMigrationAgent } from '../../src/migrations/PostgresMigrationAgent.js';
|
|
5
6
|
import { env } from './env.js';
|
|
6
|
-
import { POSTGRES_STORAGE_FACTORY } from './util.js';
|
|
7
|
+
import { POSTGRES_STORAGE_FACTORY, POSTGRES_STORAGE_SETUP } from './util.js';
|
|
7
8
|
|
|
8
9
|
const MIGRATION_AGENT_FACTORY = () => {
|
|
9
10
|
return new PostgresMigrationAgent({ type: 'postgresql', uri: env.PG_STORAGE_TEST_URL, sslmode: 'disable' });
|
|
10
11
|
};
|
|
11
12
|
|
|
12
13
|
describe('Migrations', () => {
|
|
14
|
+
beforeEach(async () => {
|
|
15
|
+
// The migration tests clear the migration store, without running the down migrations.
|
|
16
|
+
// This ensures all the down migrations have been run before.
|
|
17
|
+
const setup = POSTGRES_STORAGE_SETUP;
|
|
18
|
+
await setup.migrate(Direction.Down);
|
|
19
|
+
});
|
|
20
|
+
|
|
13
21
|
register.registerMigrationTests(MIGRATION_AGENT_FACTORY);
|
|
14
22
|
|
|
15
23
|
it('Should have tables declared', async () => {
|
package/test/src/util.ts
CHANGED
|
@@ -2,7 +2,10 @@ import path from 'path';
|
|
|
2
2
|
import { fileURLToPath } from 'url';
|
|
3
3
|
import { normalizePostgresStorageConfig } from '../../src//types/types.js';
|
|
4
4
|
import { PostgresMigrationAgent } from '../../src/migrations/PostgresMigrationAgent.js';
|
|
5
|
-
import {
|
|
5
|
+
import {
|
|
6
|
+
postgresTestSetup,
|
|
7
|
+
PostgresTestStorageFactoryGenerator
|
|
8
|
+
} from '../../src/storage/PostgresTestStorageFactoryGenerator.js';
|
|
6
9
|
import { env } from './env.js';
|
|
7
10
|
|
|
8
11
|
const __filename = fileURLToPath(import.meta.url);
|
|
@@ -28,7 +31,9 @@ class TestPostgresMigrationAgent extends PostgresMigrationAgent {
|
|
|
28
31
|
}
|
|
29
32
|
}
|
|
30
33
|
|
|
31
|
-
export const
|
|
34
|
+
export const POSTGRES_STORAGE_SETUP = postgresTestSetup({
|
|
32
35
|
url: env.PG_STORAGE_TEST_URL,
|
|
33
36
|
migrationAgent: (config) => new TestPostgresMigrationAgent(config)
|
|
34
37
|
});
|
|
38
|
+
|
|
39
|
+
export const POSTGRES_STORAGE_FACTORY = POSTGRES_STORAGE_SETUP.factory;
|