@powersync/service-core 0.0.0-dev-20240718134716 → 0.0.0-dev-20240918082156
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +89 -6
- package/dist/api/RouteAPI.d.ts +68 -0
- package/dist/api/RouteAPI.js +2 -0
- package/dist/api/RouteAPI.js.map +1 -0
- package/dist/api/api-index.d.ts +1 -0
- package/dist/api/api-index.js +1 -0
- package/dist/api/api-index.js.map +1 -1
- package/dist/api/diagnostics.d.ts +4 -4
- package/dist/api/diagnostics.js +11 -65
- package/dist/api/diagnostics.js.map +1 -1
- package/dist/api/schema.d.ts +3 -5
- package/dist/api/schema.js +9 -79
- package/dist/api/schema.js.map +1 -1
- package/dist/auth/KeyStore.d.ts +7 -4
- package/dist/auth/KeyStore.js +1 -1
- package/dist/auth/KeyStore.js.map +1 -1
- package/dist/auth/auth-index.d.ts +0 -1
- package/dist/auth/auth-index.js +0 -1
- package/dist/auth/auth-index.js.map +1 -1
- package/dist/entry/cli-entry.js +4 -2
- package/dist/entry/cli-entry.js.map +1 -1
- package/dist/entry/commands/compact-action.d.ts +2 -0
- package/dist/entry/commands/compact-action.js +52 -0
- package/dist/entry/commands/compact-action.js.map +1 -0
- package/dist/entry/commands/migrate-action.js +4 -5
- package/dist/entry/commands/migrate-action.js.map +1 -1
- package/dist/entry/commands/teardown-action.js +2 -2
- package/dist/entry/commands/teardown-action.js.map +1 -1
- package/dist/entry/entry-index.d.ts +1 -0
- package/dist/entry/entry-index.js +1 -0
- package/dist/entry/entry-index.js.map +1 -1
- package/dist/index.d.ts +4 -2
- package/dist/index.js +4 -2
- package/dist/index.js.map +1 -1
- package/dist/metrics/Metrics.d.ts +6 -5
- package/dist/metrics/Metrics.js +53 -10
- package/dist/metrics/Metrics.js.map +1 -1
- package/dist/migrations/db/migrations/1684951997326-init.d.ts +2 -2
- package/dist/migrations/db/migrations/1684951997326-init.js +4 -2
- package/dist/migrations/db/migrations/1684951997326-init.js.map +1 -1
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.d.ts +2 -2
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.js +4 -2
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.js.map +1 -1
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.d.ts +2 -2
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.js +4 -2
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.js.map +1 -1
- package/dist/migrations/migrations.d.ts +8 -0
- package/dist/migrations/migrations.js +19 -7
- package/dist/migrations/migrations.js.map +1 -1
- package/dist/modules/AbstractModule.d.ts +26 -0
- package/dist/modules/AbstractModule.js +11 -0
- package/dist/modules/AbstractModule.js.map +1 -0
- package/dist/modules/ModuleManager.d.ts +11 -0
- package/dist/modules/ModuleManager.js +32 -0
- package/dist/modules/ModuleManager.js.map +1 -0
- package/dist/modules/modules-index.d.ts +2 -0
- package/dist/modules/modules-index.js +3 -0
- package/dist/modules/modules-index.js.map +1 -0
- package/dist/replication/AbstractReplicationJob.d.ts +38 -0
- package/dist/replication/AbstractReplicationJob.js +51 -0
- package/dist/replication/AbstractReplicationJob.js.map +1 -0
- package/dist/replication/AbstractReplicator.d.ts +53 -0
- package/dist/replication/AbstractReplicator.js +187 -0
- package/dist/replication/AbstractReplicator.js.map +1 -0
- package/dist/replication/ErrorRateLimiter.d.ts +0 -9
- package/dist/replication/ErrorRateLimiter.js +1 -42
- package/dist/replication/ErrorRateLimiter.js.map +1 -1
- package/dist/replication/ReplicationEngine.d.ts +18 -0
- package/dist/replication/ReplicationEngine.js +41 -0
- package/dist/replication/ReplicationEngine.js.map +1 -0
- package/dist/replication/ReplicationModule.d.ts +39 -0
- package/dist/replication/ReplicationModule.js +65 -0
- package/dist/replication/ReplicationModule.js.map +1 -0
- package/dist/replication/replication-index.d.ts +4 -6
- package/dist/replication/replication-index.js +4 -6
- package/dist/replication/replication-index.js.map +1 -1
- package/dist/routes/RouterEngine.d.ts +42 -0
- package/dist/routes/RouterEngine.js +80 -0
- package/dist/routes/RouterEngine.js.map +1 -0
- package/dist/routes/auth.d.ts +2 -2
- package/dist/routes/auth.js +11 -11
- package/dist/routes/auth.js.map +1 -1
- package/dist/routes/configure-fastify.d.ts +737 -0
- package/dist/routes/configure-fastify.js +57 -0
- package/dist/routes/configure-fastify.js.map +1 -0
- package/dist/routes/configure-rsocket.d.ts +13 -0
- package/dist/routes/configure-rsocket.js +47 -0
- package/dist/routes/configure-rsocket.js.map +1 -0
- package/dist/routes/endpoints/admin.d.ts +0 -34
- package/dist/routes/endpoints/admin.js +48 -89
- package/dist/routes/endpoints/admin.js.map +1 -1
- package/dist/routes/endpoints/checkpointing.d.ts +56 -16
- package/dist/routes/endpoints/checkpointing.js +33 -12
- package/dist/routes/endpoints/checkpointing.js.map +1 -1
- package/dist/routes/endpoints/route-endpoints-index.d.ts +0 -1
- package/dist/routes/endpoints/route-endpoints-index.js +0 -1
- package/dist/routes/endpoints/route-endpoints-index.js.map +1 -1
- package/dist/routes/endpoints/socket-route.js +46 -39
- package/dist/routes/endpoints/socket-route.js.map +1 -1
- package/dist/routes/endpoints/sync-rules.d.ts +1 -1
- package/dist/routes/endpoints/sync-rules.js +32 -23
- package/dist/routes/endpoints/sync-rules.js.map +1 -1
- package/dist/routes/endpoints/sync-stream.d.ts +10 -0
- package/dist/routes/endpoints/sync-stream.js +17 -13
- package/dist/routes/endpoints/sync-stream.js.map +1 -1
- package/dist/routes/route-register.d.ts +1 -1
- package/dist/routes/route-register.js +1 -1
- package/dist/routes/route-register.js.map +1 -1
- package/dist/routes/router-socket.d.ts +5 -4
- package/dist/routes/router-socket.js +2 -1
- package/dist/routes/router-socket.js.map +1 -1
- package/dist/routes/router.d.ts +7 -2
- package/dist/routes/router.js.map +1 -1
- package/dist/routes/routes-index.d.ts +3 -0
- package/dist/routes/routes-index.js +3 -0
- package/dist/routes/routes-index.js.map +1 -1
- package/dist/runner/teardown.js +47 -76
- package/dist/runner/teardown.js.map +1 -1
- package/dist/storage/BucketStorage.d.ts +61 -20
- package/dist/storage/BucketStorage.js +0 -10
- package/dist/storage/BucketStorage.js.map +1 -1
- package/dist/storage/MongoBucketStorage.d.ts +4 -4
- package/dist/storage/MongoBucketStorage.js +19 -24
- package/dist/storage/MongoBucketStorage.js.map +1 -1
- package/dist/storage/SourceEntity.d.ts +20 -0
- package/dist/storage/SourceEntity.js +2 -0
- package/dist/storage/SourceEntity.js.map +1 -0
- package/dist/storage/SourceTable.d.ts +4 -5
- package/dist/storage/SourceTable.js +3 -4
- package/dist/storage/SourceTable.js.map +1 -1
- package/dist/storage/StorageEngine.d.ts +24 -0
- package/dist/storage/StorageEngine.js +43 -0
- package/dist/storage/StorageEngine.js.map +1 -0
- package/dist/storage/StorageProvider.d.ts +21 -0
- package/dist/storage/StorageProvider.js +2 -0
- package/dist/storage/StorageProvider.js.map +1 -0
- package/dist/storage/mongo/MongoBucketBatch.d.ts +1 -1
- package/dist/storage/mongo/MongoBucketBatch.js +6 -7
- package/dist/storage/mongo/MongoBucketBatch.js.map +1 -1
- package/dist/storage/mongo/MongoCompactor.d.ts +40 -0
- package/dist/storage/mongo/MongoCompactor.js +293 -0
- package/dist/storage/mongo/MongoCompactor.js.map +1 -0
- package/dist/storage/mongo/MongoPersistedSyncRulesContent.d.ts +2 -2
- package/dist/storage/mongo/MongoPersistedSyncRulesContent.js +2 -2
- package/dist/storage/mongo/MongoPersistedSyncRulesContent.js.map +1 -1
- package/dist/storage/mongo/MongoStorageProvider.d.ts +5 -0
- package/dist/storage/mongo/MongoStorageProvider.js +26 -0
- package/dist/storage/mongo/MongoStorageProvider.js.map +1 -0
- package/dist/storage/mongo/MongoSyncBucketStorage.d.ts +9 -7
- package/dist/storage/mongo/MongoSyncBucketStorage.js +43 -28
- package/dist/storage/mongo/MongoSyncBucketStorage.js.map +1 -1
- package/dist/storage/mongo/MongoSyncRulesLock.js +1 -1
- package/dist/storage/mongo/MongoSyncRulesLock.js.map +1 -1
- package/dist/storage/mongo/OperationBatch.d.ts +7 -3
- package/dist/storage/mongo/OperationBatch.js +16 -7
- package/dist/storage/mongo/OperationBatch.js.map +1 -1
- package/dist/storage/mongo/PersistedBatch.d.ts +3 -3
- package/dist/storage/mongo/PersistedBatch.js +2 -2
- package/dist/storage/mongo/PersistedBatch.js.map +1 -1
- package/dist/storage/mongo/models.d.ts +17 -7
- package/dist/storage/mongo/models.js.map +1 -1
- package/dist/storage/mongo/util.d.ts +14 -0
- package/dist/storage/mongo/util.js +70 -0
- package/dist/storage/mongo/util.js.map +1 -1
- package/dist/storage/storage-index.d.ts +5 -2
- package/dist/storage/storage-index.js +5 -2
- package/dist/storage/storage-index.js.map +1 -1
- package/dist/sync/RequestTracker.js +2 -3
- package/dist/sync/RequestTracker.js.map +1 -1
- package/dist/sync/sync-index.d.ts +1 -0
- package/dist/sync/sync-index.js +1 -0
- package/dist/sync/sync-index.js.map +1 -1
- package/dist/sync/sync.d.ts +2 -1
- package/dist/sync/sync.js +56 -17
- package/dist/sync/sync.js.map +1 -1
- package/dist/system/ServiceContext.d.ts +37 -0
- package/dist/system/ServiceContext.js +48 -0
- package/dist/system/ServiceContext.js.map +1 -0
- package/dist/system/system-index.d.ts +1 -1
- package/dist/system/system-index.js +1 -1
- package/dist/system/system-index.js.map +1 -1
- package/dist/util/config/collectors/config-collector.d.ts +12 -0
- package/dist/util/config/collectors/config-collector.js +43 -0
- package/dist/util/config/collectors/config-collector.js.map +1 -1
- package/dist/util/config/compound-config-collector.d.ts +10 -29
- package/dist/util/config/compound-config-collector.js +28 -84
- package/dist/util/config/compound-config-collector.js.map +1 -1
- package/dist/util/config/sync-rules/sync-rules-provider.d.ts +9 -0
- package/dist/util/config/sync-rules/sync-rules-provider.js +15 -0
- package/dist/util/config/sync-rules/sync-rules-provider.js.map +1 -0
- package/dist/util/config/types.d.ts +6 -4
- package/dist/util/config/types.js.map +1 -1
- package/dist/util/config.d.ts +3 -4
- package/dist/util/config.js +5 -20
- package/dist/util/config.js.map +1 -1
- package/dist/util/protocol-types.d.ts +4 -0
- package/dist/util/protocol-types.js +5 -1
- package/dist/util/protocol-types.js.map +1 -1
- package/dist/util/util-index.d.ts +3 -6
- package/dist/util/util-index.js +3 -6
- package/dist/util/util-index.js.map +1 -1
- package/dist/util/utils.d.ts +10 -6
- package/dist/util/utils.js +45 -25
- package/dist/util/utils.js.map +1 -1
- package/package.json +7 -7
- package/src/api/RouteAPI.ts +78 -0
- package/src/api/api-index.ts +1 -0
- package/src/api/diagnostics.ts +16 -71
- package/src/api/schema.ts +13 -89
- package/src/auth/KeyStore.ts +9 -6
- package/src/auth/auth-index.ts +0 -1
- package/src/entry/cli-entry.ts +4 -2
- package/src/entry/commands/compact-action.ts +57 -0
- package/src/entry/commands/migrate-action.ts +5 -8
- package/src/entry/commands/teardown-action.ts +2 -2
- package/src/entry/entry-index.ts +1 -0
- package/src/index.ts +5 -2
- package/src/metrics/Metrics.ts +70 -15
- package/src/migrations/db/migrations/1684951997326-init.ts +9 -4
- package/src/migrations/db/migrations/1702295701188-sync-rule-state.ts +7 -4
- package/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts +6 -4
- package/src/migrations/migrations.ts +24 -8
- package/src/modules/AbstractModule.ts +37 -0
- package/src/modules/ModuleManager.ts +34 -0
- package/src/modules/modules-index.ts +2 -0
- package/src/replication/AbstractReplicationJob.ts +79 -0
- package/src/replication/AbstractReplicator.ts +227 -0
- package/src/replication/ErrorRateLimiter.ts +0 -44
- package/src/replication/ReplicationEngine.ts +43 -0
- package/src/replication/ReplicationModule.ts +101 -0
- package/src/replication/replication-index.ts +4 -6
- package/src/routes/RouterEngine.ts +120 -0
- package/src/routes/auth.ts +21 -12
- package/src/routes/configure-fastify.ts +101 -0
- package/src/routes/configure-rsocket.ts +60 -0
- package/src/routes/endpoints/admin.ts +74 -100
- package/src/routes/endpoints/checkpointing.ts +46 -12
- package/src/routes/endpoints/route-endpoints-index.ts +0 -1
- package/src/routes/endpoints/socket-route.ts +50 -42
- package/src/routes/endpoints/sync-rules.ts +41 -25
- package/src/routes/endpoints/sync-stream.ts +17 -13
- package/src/routes/route-register.ts +2 -2
- package/src/routes/router-socket.ts +6 -5
- package/src/routes/router.ts +7 -2
- package/src/routes/routes-index.ts +3 -0
- package/src/runner/teardown.ts +50 -88
- package/src/storage/BucketStorage.ts +74 -26
- package/src/storage/MongoBucketStorage.ts +23 -26
- package/src/storage/SourceEntity.ts +22 -0
- package/src/storage/SourceTable.ts +4 -6
- package/src/storage/StorageEngine.ts +55 -0
- package/src/storage/StorageProvider.ts +27 -0
- package/src/storage/mongo/MongoBucketBatch.ts +8 -8
- package/src/storage/mongo/MongoCompactor.ts +372 -0
- package/src/storage/mongo/MongoPersistedSyncRulesContent.ts +3 -3
- package/src/storage/mongo/MongoStorageProvider.ts +31 -0
- package/src/storage/mongo/MongoSyncBucketStorage.ts +64 -34
- package/src/storage/mongo/MongoSyncRulesLock.ts +1 -1
- package/src/storage/mongo/OperationBatch.ts +18 -11
- package/src/storage/mongo/PersistedBatch.ts +6 -5
- package/src/storage/mongo/models.ts +17 -7
- package/src/storage/mongo/util.ts +71 -1
- package/src/storage/storage-index.ts +5 -2
- package/src/sync/RequestTracker.ts +3 -3
- package/src/sync/sync-index.ts +1 -0
- package/src/sync/sync.ts +66 -17
- package/src/system/ServiceContext.ts +68 -0
- package/src/system/system-index.ts +1 -1
- package/src/util/config/collectors/config-collector.ts +48 -0
- package/src/util/config/compound-config-collector.ts +45 -110
- package/src/util/config/sync-rules/sync-rules-provider.ts +18 -0
- package/src/util/config/types.ts +6 -5
- package/src/util/config.ts +6 -23
- package/src/util/protocol-types.ts +6 -1
- package/src/util/util-index.ts +3 -6
- package/src/util/utils.ts +55 -39
- package/test/src/__snapshots__/sync.test.ts.snap +90 -5
- package/test/src/auth.test.ts +7 -7
- package/test/src/broadcast_iterable.test.ts +1 -1
- package/test/src/bucket_validation.test.ts +142 -0
- package/test/src/bucket_validation.ts +116 -0
- package/test/src/checksum_cache.test.ts +3 -3
- package/test/src/compacting.test.ts +216 -0
- package/test/src/data_storage.test.ts +275 -204
- package/test/src/env.ts +1 -3
- package/test/src/merge_iterable.test.ts +1 -6
- package/test/src/setup.ts +1 -1
- package/test/src/stream_utils.ts +42 -0
- package/test/src/sync.test.ts +209 -48
- package/test/src/util.ts +110 -55
- package/test/tsconfig.json +1 -1
- package/tsconfig.tsbuildinfo +1 -1
- package/dist/auth/SupabaseKeyCollector.d.ts +0 -22
- package/dist/auth/SupabaseKeyCollector.js +0 -61
- package/dist/auth/SupabaseKeyCollector.js.map +0 -1
- package/dist/replication/PgRelation.d.ts +0 -16
- package/dist/replication/PgRelation.js +0 -26
- package/dist/replication/PgRelation.js.map +0 -1
- package/dist/replication/WalConnection.d.ts +0 -34
- package/dist/replication/WalConnection.js +0 -190
- package/dist/replication/WalConnection.js.map +0 -1
- package/dist/replication/WalStream.d.ts +0 -57
- package/dist/replication/WalStream.js +0 -517
- package/dist/replication/WalStream.js.map +0 -1
- package/dist/replication/WalStreamManager.d.ts +0 -30
- package/dist/replication/WalStreamManager.js +0 -198
- package/dist/replication/WalStreamManager.js.map +0 -1
- package/dist/replication/WalStreamRunner.d.ts +0 -38
- package/dist/replication/WalStreamRunner.js +0 -155
- package/dist/replication/WalStreamRunner.js.map +0 -1
- package/dist/replication/util.d.ts +0 -9
- package/dist/replication/util.js +0 -62
- package/dist/replication/util.js.map +0 -1
- package/dist/routes/endpoints/dev.d.ts +0 -312
- package/dist/routes/endpoints/dev.js +0 -172
- package/dist/routes/endpoints/dev.js.map +0 -1
- package/dist/system/CorePowerSyncSystem.d.ts +0 -23
- package/dist/system/CorePowerSyncSystem.js +0 -52
- package/dist/system/CorePowerSyncSystem.js.map +0 -1
- package/dist/util/PgManager.d.ts +0 -24
- package/dist/util/PgManager.js +0 -55
- package/dist/util/PgManager.js.map +0 -1
- package/dist/util/migration_lib.d.ts +0 -11
- package/dist/util/migration_lib.js +0 -64
- package/dist/util/migration_lib.js.map +0 -1
- package/dist/util/pgwire_utils.d.ts +0 -24
- package/dist/util/pgwire_utils.js +0 -117
- package/dist/util/pgwire_utils.js.map +0 -1
- package/dist/util/populate_test_data.d.ts +0 -8
- package/dist/util/populate_test_data.js +0 -65
- package/dist/util/populate_test_data.js.map +0 -1
- package/src/auth/SupabaseKeyCollector.ts +0 -67
- package/src/replication/PgRelation.ts +0 -42
- package/src/replication/WalConnection.ts +0 -227
- package/src/replication/WalStream.ts +0 -628
- package/src/replication/WalStreamManager.ts +0 -213
- package/src/replication/WalStreamRunner.ts +0 -180
- package/src/replication/util.ts +0 -76
- package/src/routes/endpoints/dev.ts +0 -199
- package/src/system/CorePowerSyncSystem.ts +0 -64
- package/src/util/PgManager.ts +0 -64
- package/src/util/migration_lib.ts +0 -79
- package/src/util/pgwire_utils.ts +0 -139
- package/src/util/populate_test_data.ts +0 -78
- package/test/src/__snapshots__/pg_test.test.ts.snap +0 -256
- package/test/src/large_batch.test.ts +0 -194
- package/test/src/pg_test.test.ts +0 -450
- package/test/src/schema_changes.test.ts +0 -545
- package/test/src/slow_tests.test.ts +0 -296
- package/test/src/validation.test.ts +0 -63
- package/test/src/wal_stream.test.ts +0 -314
- package/test/src/wal_stream_utils.ts +0 -147
|
@@ -1,296 +0,0 @@
|
|
|
1
|
-
import * as bson from 'bson';
|
|
2
|
-
import * as mongo from 'mongodb';
|
|
3
|
-
import { afterEach, describe, expect, test } from 'vitest';
|
|
4
|
-
import { WalStream, WalStreamOptions } from '../../src/replication/WalStream.js';
|
|
5
|
-
import { getClientCheckpoint } from '../../src/util/utils.js';
|
|
6
|
-
import { env } from './env.js';
|
|
7
|
-
import { MONGO_STORAGE_FACTORY, StorageFactory, TEST_CONNECTION_OPTIONS, connectPgPool } from './util.js';
|
|
8
|
-
|
|
9
|
-
import * as pgwire from '@powersync/service-jpgwire';
|
|
10
|
-
import { SqliteRow } from '@powersync/service-sync-rules';
|
|
11
|
-
import { MongoBucketStorage } from '../../src/storage/MongoBucketStorage.js';
|
|
12
|
-
import { PgManager } from '../../src/util/PgManager.js';
|
|
13
|
-
|
|
14
|
-
describe('slow tests - mongodb', function () {
|
|
15
|
-
// These are slow, inconsistent tests.
|
|
16
|
-
// Not run on every test run, but we do run on CI, or when manually debugging issues.
|
|
17
|
-
if (env.CI || env.SLOW_TESTS) {
|
|
18
|
-
defineSlowTests(MONGO_STORAGE_FACTORY);
|
|
19
|
-
} else {
|
|
20
|
-
// Need something in this file.
|
|
21
|
-
test('no-op', () => {});
|
|
22
|
-
}
|
|
23
|
-
});
|
|
24
|
-
|
|
25
|
-
function defineSlowTests(factory: StorageFactory) {
|
|
26
|
-
let walStream: WalStream | undefined;
|
|
27
|
-
let connections: PgManager | undefined;
|
|
28
|
-
let abortController: AbortController | undefined;
|
|
29
|
-
let streamPromise: Promise<void> | undefined;
|
|
30
|
-
|
|
31
|
-
afterEach(async () => {
|
|
32
|
-
// This cleans up, similar to WalStreamTestContext.dispose().
|
|
33
|
-
// These tests are a little more complex than what is supported by WalStreamTestContext.
|
|
34
|
-
abortController?.abort();
|
|
35
|
-
await streamPromise;
|
|
36
|
-
streamPromise = undefined;
|
|
37
|
-
connections?.destroy();
|
|
38
|
-
|
|
39
|
-
connections = undefined;
|
|
40
|
-
walStream = undefined;
|
|
41
|
-
abortController = undefined;
|
|
42
|
-
});
|
|
43
|
-
|
|
44
|
-
const TEST_DURATION_MS = 15_000;
|
|
45
|
-
const TIMEOUT_MARGIN_MS = env.CI ? 30_000 : 15_000;
|
|
46
|
-
|
|
47
|
-
// Test repeatedly replicating inserts and deletes, then check that we get
|
|
48
|
-
// consistent data out at the end.
|
|
49
|
-
//
|
|
50
|
-
// Past issues that this could reproduce intermittently:
|
|
51
|
-
// * Skipping LSNs after a keepalive message
|
|
52
|
-
// * Skipping LSNs when source transactions overlap
|
|
53
|
-
test(
|
|
54
|
-
'repeated replication',
|
|
55
|
-
async () => {
|
|
56
|
-
const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
|
|
57
|
-
const replicationConnection = await connections.replicationConnection();
|
|
58
|
-
const pool = connections.pool;
|
|
59
|
-
const f = (await factory()) as MongoBucketStorage;
|
|
60
|
-
|
|
61
|
-
const syncRuleContent = `
|
|
62
|
-
bucket_definitions:
|
|
63
|
-
global:
|
|
64
|
-
data:
|
|
65
|
-
- SELECT * FROM "test_data"
|
|
66
|
-
`;
|
|
67
|
-
const syncRules = await f.updateSyncRules({ content: syncRuleContent });
|
|
68
|
-
const storage = f.getInstance(syncRules.parsed());
|
|
69
|
-
abortController = new AbortController();
|
|
70
|
-
const options: WalStreamOptions = {
|
|
71
|
-
abort_signal: abortController.signal,
|
|
72
|
-
connections,
|
|
73
|
-
storage: storage,
|
|
74
|
-
factory: f
|
|
75
|
-
};
|
|
76
|
-
walStream = new WalStream(options);
|
|
77
|
-
|
|
78
|
-
await pool.query(`DROP TABLE IF EXISTS test_data`);
|
|
79
|
-
await pool.query(
|
|
80
|
-
`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text, num decimal)`
|
|
81
|
-
);
|
|
82
|
-
await pool.query(`ALTER TABLE test_data REPLICA IDENTITY FULL`);
|
|
83
|
-
|
|
84
|
-
await walStream.initReplication(replicationConnection);
|
|
85
|
-
await storage.autoActivate();
|
|
86
|
-
let abort = false;
|
|
87
|
-
streamPromise = walStream.streamChanges(replicationConnection).finally(() => {
|
|
88
|
-
abort = true;
|
|
89
|
-
});
|
|
90
|
-
const start = Date.now();
|
|
91
|
-
|
|
92
|
-
while (!abort && Date.now() - start < TEST_DURATION_MS) {
|
|
93
|
-
const bg = async () => {
|
|
94
|
-
for (let j = 0; j < 1 && !abort; j++) {
|
|
95
|
-
const n = 1;
|
|
96
|
-
let statements: pgwire.Statement[] = [];
|
|
97
|
-
for (let i = 0; i < n; i++) {
|
|
98
|
-
const description = `test${i}`;
|
|
99
|
-
statements.push({
|
|
100
|
-
statement: `INSERT INTO test_data(description, num) VALUES($1, $2) returning id as test_id`,
|
|
101
|
-
params: [
|
|
102
|
-
{ type: 'varchar', value: description },
|
|
103
|
-
{ type: 'float8', value: Math.random() }
|
|
104
|
-
]
|
|
105
|
-
});
|
|
106
|
-
}
|
|
107
|
-
const results = await pool.query(...statements);
|
|
108
|
-
const ids = results.results.map((sub) => {
|
|
109
|
-
return sub.rows[0][0] as string;
|
|
110
|
-
});
|
|
111
|
-
await new Promise((resolve) => setTimeout(resolve, Math.random() * 30));
|
|
112
|
-
|
|
113
|
-
if (Math.random() > 0.5) {
|
|
114
|
-
const updateStatements: pgwire.Statement[] = ids.map((id) => {
|
|
115
|
-
return {
|
|
116
|
-
statement: `UPDATE test_data SET num = $2 WHERE id = $1`,
|
|
117
|
-
params: [
|
|
118
|
-
{ type: 'uuid', value: id },
|
|
119
|
-
{ type: 'float8', value: Math.random() }
|
|
120
|
-
]
|
|
121
|
-
};
|
|
122
|
-
});
|
|
123
|
-
|
|
124
|
-
await pool.query(...updateStatements);
|
|
125
|
-
if (Math.random() > 0.5) {
|
|
126
|
-
// Special case - an update that doesn't change data
|
|
127
|
-
await pool.query(...updateStatements);
|
|
128
|
-
}
|
|
129
|
-
}
|
|
130
|
-
|
|
131
|
-
const deleteStatements: pgwire.Statement[] = ids.map((id) => {
|
|
132
|
-
return {
|
|
133
|
-
statement: `DELETE FROM test_data WHERE id = $1`,
|
|
134
|
-
params: [{ type: 'uuid', value: id }]
|
|
135
|
-
};
|
|
136
|
-
});
|
|
137
|
-
await pool.query(...deleteStatements);
|
|
138
|
-
|
|
139
|
-
await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
|
|
140
|
-
}
|
|
141
|
-
};
|
|
142
|
-
|
|
143
|
-
// Call the above loop multiple times concurrently
|
|
144
|
-
let promises = [1, 2, 3].map((i) => bg());
|
|
145
|
-
await Promise.all(promises);
|
|
146
|
-
|
|
147
|
-
// Wait for replication to finish
|
|
148
|
-
let checkpoint = await getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS });
|
|
149
|
-
|
|
150
|
-
// Check that all inserts have been deleted again
|
|
151
|
-
const docs = await f.db.current_data.find().toArray();
|
|
152
|
-
const transformed = docs.map((doc) => {
|
|
153
|
-
return bson.deserialize((doc.data as mongo.Binary).buffer) as SqliteRow;
|
|
154
|
-
});
|
|
155
|
-
expect(transformed).toEqual([]);
|
|
156
|
-
|
|
157
|
-
// Check that each PUT has a REMOVE
|
|
158
|
-
const ops = await f.db.bucket_data.find().sort({ _id: 1 }).toArray();
|
|
159
|
-
let active = new Set<string>();
|
|
160
|
-
for (let op of ops) {
|
|
161
|
-
const key = op.source_key.toHexString();
|
|
162
|
-
if (op.op == 'PUT') {
|
|
163
|
-
active.add(key);
|
|
164
|
-
} else if (op.op == 'REMOVE') {
|
|
165
|
-
active.delete(key);
|
|
166
|
-
}
|
|
167
|
-
}
|
|
168
|
-
if (active.size > 0) {
|
|
169
|
-
throw new Error(`${active.size} rows not removed`);
|
|
170
|
-
}
|
|
171
|
-
}
|
|
172
|
-
|
|
173
|
-
abortController.abort();
|
|
174
|
-
await streamPromise;
|
|
175
|
-
},
|
|
176
|
-
{ timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }
|
|
177
|
-
);
|
|
178
|
-
|
|
179
|
-
// Test repeatedly performing initial replication.
|
|
180
|
-
//
|
|
181
|
-
// If the first LSN does not correctly match with the first replication transaction,
|
|
182
|
-
// we may miss some updates.
|
|
183
|
-
test(
|
|
184
|
-
'repeated initial replication',
|
|
185
|
-
async () => {
|
|
186
|
-
const pool = await connectPgPool();
|
|
187
|
-
const f = await factory();
|
|
188
|
-
|
|
189
|
-
const syncRuleContent = `
|
|
190
|
-
bucket_definitions:
|
|
191
|
-
global:
|
|
192
|
-
data:
|
|
193
|
-
- SELECT id, description FROM "test_data"
|
|
194
|
-
`;
|
|
195
|
-
const syncRules = await f.updateSyncRules({ content: syncRuleContent });
|
|
196
|
-
const storage = f.getInstance(syncRules.parsed());
|
|
197
|
-
|
|
198
|
-
// 1. Setup some base data that will be replicated in initial replication
|
|
199
|
-
await pool.query(`DROP TABLE IF EXISTS test_data`);
|
|
200
|
-
await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
|
|
201
|
-
|
|
202
|
-
let statements: pgwire.Statement[] = [];
|
|
203
|
-
|
|
204
|
-
const n = Math.floor(Math.random() * 200);
|
|
205
|
-
for (let i = 0; i < n; i++) {
|
|
206
|
-
statements.push({
|
|
207
|
-
statement: `INSERT INTO test_data(description) VALUES('test_init')`
|
|
208
|
-
});
|
|
209
|
-
}
|
|
210
|
-
await pool.query(...statements);
|
|
211
|
-
|
|
212
|
-
const start = Date.now();
|
|
213
|
-
let i = 0;
|
|
214
|
-
|
|
215
|
-
while (Date.now() - start < TEST_DURATION_MS) {
|
|
216
|
-
// 2. Each iteration starts with a clean slate
|
|
217
|
-
await pool.query(`SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE active = FALSE`);
|
|
218
|
-
i += 1;
|
|
219
|
-
|
|
220
|
-
const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
|
|
221
|
-
const replicationConnection = await connections.replicationConnection();
|
|
222
|
-
|
|
223
|
-
abortController = new AbortController();
|
|
224
|
-
const options: WalStreamOptions = {
|
|
225
|
-
abort_signal: abortController.signal,
|
|
226
|
-
connections,
|
|
227
|
-
storage: storage,
|
|
228
|
-
factory: f
|
|
229
|
-
};
|
|
230
|
-
walStream = new WalStream(options);
|
|
231
|
-
|
|
232
|
-
await storage.clear();
|
|
233
|
-
|
|
234
|
-
// 3. Start initial replication, then streaming, but don't wait for any of this
|
|
235
|
-
let initialReplicationDone = false;
|
|
236
|
-
streamPromise = (async () => {
|
|
237
|
-
await walStream.initReplication(replicationConnection);
|
|
238
|
-
await storage.autoActivate();
|
|
239
|
-
initialReplicationDone = true;
|
|
240
|
-
await walStream.streamChanges(replicationConnection);
|
|
241
|
-
})()
|
|
242
|
-
.catch((e) => {
|
|
243
|
-
initialReplicationDone = true;
|
|
244
|
-
throw e;
|
|
245
|
-
})
|
|
246
|
-
.then((v) => {
|
|
247
|
-
return v;
|
|
248
|
-
});
|
|
249
|
-
|
|
250
|
-
// 4. While initial replication is still running, write more changes
|
|
251
|
-
while (!initialReplicationDone) {
|
|
252
|
-
let statements: pgwire.Statement[] = [];
|
|
253
|
-
const n = Math.floor(Math.random() * 10) + 1;
|
|
254
|
-
for (let i = 0; i < n; i++) {
|
|
255
|
-
const description = `test${i}`;
|
|
256
|
-
statements.push({
|
|
257
|
-
statement: `INSERT INTO test_data(description) VALUES('test1') returning id as test_id`,
|
|
258
|
-
params: [{ type: 'varchar', value: description }]
|
|
259
|
-
});
|
|
260
|
-
}
|
|
261
|
-
const results = await pool.query(...statements);
|
|
262
|
-
const ids = results.results.map((sub) => {
|
|
263
|
-
return sub.rows[0][0] as string;
|
|
264
|
-
});
|
|
265
|
-
await new Promise((resolve) => setTimeout(resolve, Math.random() * 30));
|
|
266
|
-
const deleteStatements: pgwire.Statement[] = ids.map((id) => {
|
|
267
|
-
return {
|
|
268
|
-
statement: `DELETE FROM test_data WHERE id = $1`,
|
|
269
|
-
params: [{ type: 'uuid', value: id }]
|
|
270
|
-
};
|
|
271
|
-
});
|
|
272
|
-
await pool.query(...deleteStatements);
|
|
273
|
-
await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
|
|
274
|
-
}
|
|
275
|
-
|
|
276
|
-
// 5. Once initial replication is done, wait for the streaming changes to complete syncing.
|
|
277
|
-
// getClientCheckpoint() effectively waits for the above replication to complete
|
|
278
|
-
// Race with streamingPromise to catch replication errors here.
|
|
279
|
-
let checkpoint = await Promise.race([
|
|
280
|
-
getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS }),
|
|
281
|
-
streamPromise
|
|
282
|
-
]);
|
|
283
|
-
if (typeof checkpoint == undefined) {
|
|
284
|
-
// This indicates an issue with the test setup - streamingPromise completed instead
|
|
285
|
-
// of getClientCheckpoint()
|
|
286
|
-
throw new Error('Test failure - streamingPromise completed');
|
|
287
|
-
}
|
|
288
|
-
|
|
289
|
-
abortController.abort();
|
|
290
|
-
await streamPromise;
|
|
291
|
-
await connections.end();
|
|
292
|
-
}
|
|
293
|
-
},
|
|
294
|
-
{ timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }
|
|
295
|
-
);
|
|
296
|
-
}
|
|
@@ -1,63 +0,0 @@
|
|
|
1
|
-
import { expect, test } from 'vitest';
|
|
2
|
-
import { MONGO_STORAGE_FACTORY } from './util.js';
|
|
3
|
-
import { walStreamTest } from './wal_stream_utils.js';
|
|
4
|
-
import { WalConnection } from '../../src/replication/WalConnection.js';
|
|
5
|
-
|
|
6
|
-
// Not quite a walStreamTest, but it helps to manage the connection
|
|
7
|
-
test(
|
|
8
|
-
'validate tables',
|
|
9
|
-
walStreamTest(MONGO_STORAGE_FACTORY, async (context) => {
|
|
10
|
-
const { pool } = context;
|
|
11
|
-
|
|
12
|
-
await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
|
|
13
|
-
|
|
14
|
-
const syncRuleContent = `
|
|
15
|
-
bucket_definitions:
|
|
16
|
-
global:
|
|
17
|
-
data:
|
|
18
|
-
- SELECT id, description FROM "test_data"
|
|
19
|
-
- SELECT * FROM "other"
|
|
20
|
-
- SELECT * FROM "other%"
|
|
21
|
-
`;
|
|
22
|
-
|
|
23
|
-
const syncRules = await context.factory.updateSyncRules({ content: syncRuleContent });
|
|
24
|
-
|
|
25
|
-
const walConnection = new WalConnection({
|
|
26
|
-
db: pool,
|
|
27
|
-
sync_rules: syncRules.parsed().sync_rules
|
|
28
|
-
});
|
|
29
|
-
|
|
30
|
-
const tablePatterns = syncRules.parsed().sync_rules.getSourceTables();
|
|
31
|
-
const tableInfo = await walConnection.getDebugTablesInfo(tablePatterns);
|
|
32
|
-
expect(tableInfo).toEqual([
|
|
33
|
-
{
|
|
34
|
-
schema: 'public',
|
|
35
|
-
pattern: 'test_data',
|
|
36
|
-
wildcard: false,
|
|
37
|
-
table: {
|
|
38
|
-
schema: 'public',
|
|
39
|
-
name: 'test_data',
|
|
40
|
-
replication_id: ['id'],
|
|
41
|
-
pattern: undefined,
|
|
42
|
-
data_queries: true,
|
|
43
|
-
parameter_queries: false,
|
|
44
|
-
errors: []
|
|
45
|
-
}
|
|
46
|
-
},
|
|
47
|
-
{
|
|
48
|
-
schema: 'public',
|
|
49
|
-
pattern: 'other',
|
|
50
|
-
wildcard: false,
|
|
51
|
-
table: {
|
|
52
|
-
schema: 'public',
|
|
53
|
-
name: 'other',
|
|
54
|
-
replication_id: [],
|
|
55
|
-
data_queries: true,
|
|
56
|
-
parameter_queries: false,
|
|
57
|
-
errors: [{ level: 'warning', message: 'Table "public"."other" not found.' }]
|
|
58
|
-
}
|
|
59
|
-
},
|
|
60
|
-
{ schema: 'public', pattern: 'other%', wildcard: true, tables: [] }
|
|
61
|
-
]);
|
|
62
|
-
})
|
|
63
|
-
);
|
|
@@ -1,314 +0,0 @@
|
|
|
1
|
-
import * as crypto from 'crypto';
|
|
2
|
-
import { describe, expect, test } from 'vitest';
|
|
3
|
-
import { BucketStorageFactory } from '@/storage/BucketStorage.js';
|
|
4
|
-
import { MONGO_STORAGE_FACTORY } from './util.js';
|
|
5
|
-
import { putOp, removeOp, walStreamTest } from './wal_stream_utils.js';
|
|
6
|
-
import { pgwireRows } from '@powersync/service-jpgwire';
|
|
7
|
-
import { Metrics } from '@/metrics/Metrics.js';
|
|
8
|
-
import { container } from '@powersync/lib-services-framework';
|
|
9
|
-
|
|
10
|
-
type StorageFactory = () => Promise<BucketStorageFactory>;
|
|
11
|
-
|
|
12
|
-
const BASIC_SYNC_RULES = `
|
|
13
|
-
bucket_definitions:
|
|
14
|
-
global:
|
|
15
|
-
data:
|
|
16
|
-
- SELECT id, description FROM "test_data"
|
|
17
|
-
`;
|
|
18
|
-
|
|
19
|
-
describe(
|
|
20
|
-
'wal stream - mongodb',
|
|
21
|
-
function () {
|
|
22
|
-
defineWalStreamTests(MONGO_STORAGE_FACTORY);
|
|
23
|
-
},
|
|
24
|
-
{ timeout: 20_000 }
|
|
25
|
-
);
|
|
26
|
-
|
|
27
|
-
function defineWalStreamTests(factory: StorageFactory) {
|
|
28
|
-
test(
|
|
29
|
-
'replicating basic values',
|
|
30
|
-
walStreamTest(factory, async (context) => {
|
|
31
|
-
const { pool } = context;
|
|
32
|
-
await context.updateSyncRules(`
|
|
33
|
-
bucket_definitions:
|
|
34
|
-
global:
|
|
35
|
-
data:
|
|
36
|
-
- SELECT id, description, num FROM "test_data"`);
|
|
37
|
-
|
|
38
|
-
await pool.query(`DROP TABLE IF EXISTS test_data`);
|
|
39
|
-
await pool.query(
|
|
40
|
-
`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text, num int8)`
|
|
41
|
-
);
|
|
42
|
-
|
|
43
|
-
await context.replicateSnapshot();
|
|
44
|
-
|
|
45
|
-
const metrics = container.getImplementation(Metrics);
|
|
46
|
-
const startRowCount = (await metrics.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
47
|
-
const startTxCount = (await metrics.getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
48
|
-
|
|
49
|
-
context.startStreaming();
|
|
50
|
-
|
|
51
|
-
const [{ test_id }] = pgwireRows(
|
|
52
|
-
await pool.query(
|
|
53
|
-
`INSERT INTO test_data(description, num) VALUES('test1', 1152921504606846976) returning id as test_id`
|
|
54
|
-
)
|
|
55
|
-
);
|
|
56
|
-
|
|
57
|
-
const data = await context.getBucketData('global[]');
|
|
58
|
-
|
|
59
|
-
expect(data).toMatchObject([
|
|
60
|
-
putOp('test_data', { id: test_id, description: 'test1', num: 1152921504606846976n })
|
|
61
|
-
]);
|
|
62
|
-
const endRowCount = (await metrics.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
63
|
-
const endTxCount = (await metrics.getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
64
|
-
expect(endRowCount - startRowCount).toEqual(1);
|
|
65
|
-
expect(endTxCount - startTxCount).toEqual(1);
|
|
66
|
-
})
|
|
67
|
-
);
|
|
68
|
-
|
|
69
|
-
test(
|
|
70
|
-
'replicating case sensitive table',
|
|
71
|
-
walStreamTest(factory, async (context) => {
|
|
72
|
-
const { pool } = context;
|
|
73
|
-
await context.updateSyncRules(`
|
|
74
|
-
bucket_definitions:
|
|
75
|
-
global:
|
|
76
|
-
data:
|
|
77
|
-
- SELECT id, description FROM "test_DATA"
|
|
78
|
-
`);
|
|
79
|
-
|
|
80
|
-
await pool.query(`DROP TABLE IF EXISTS "test_DATA"`);
|
|
81
|
-
await pool.query(`CREATE TABLE "test_DATA"(id uuid primary key default uuid_generate_v4(), description text)`);
|
|
82
|
-
|
|
83
|
-
await context.replicateSnapshot();
|
|
84
|
-
|
|
85
|
-
const metrics = container.getImplementation(Metrics);
|
|
86
|
-
const startRowCount = (await metrics.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
87
|
-
const startTxCount = (await metrics.getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
88
|
-
|
|
89
|
-
context.startStreaming();
|
|
90
|
-
|
|
91
|
-
const [{ test_id }] = pgwireRows(
|
|
92
|
-
await pool.query(`INSERT INTO "test_DATA"(description) VALUES('test1') returning id as test_id`)
|
|
93
|
-
);
|
|
94
|
-
|
|
95
|
-
const data = await context.getBucketData('global[]');
|
|
96
|
-
|
|
97
|
-
expect(data).toMatchObject([putOp('test_DATA', { id: test_id, description: 'test1' })]);
|
|
98
|
-
const endRowCount = (await metrics.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
99
|
-
const endTxCount = (await metrics.getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
100
|
-
expect(endRowCount - startRowCount).toEqual(1);
|
|
101
|
-
expect(endTxCount - startTxCount).toEqual(1);
|
|
102
|
-
})
|
|
103
|
-
);
|
|
104
|
-
|
|
105
|
-
test(
|
|
106
|
-
'replicating TOAST values',
|
|
107
|
-
walStreamTest(factory, async (context) => {
|
|
108
|
-
const { pool } = context;
|
|
109
|
-
await context.updateSyncRules(`
|
|
110
|
-
bucket_definitions:
|
|
111
|
-
global:
|
|
112
|
-
data:
|
|
113
|
-
- SELECT id, name, description FROM "test_data"
|
|
114
|
-
`);
|
|
115
|
-
|
|
116
|
-
await pool.query(`DROP TABLE IF EXISTS test_data`);
|
|
117
|
-
await pool.query(
|
|
118
|
-
`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), name text, description text)`
|
|
119
|
-
);
|
|
120
|
-
|
|
121
|
-
await context.replicateSnapshot();
|
|
122
|
-
context.startStreaming();
|
|
123
|
-
|
|
124
|
-
// Must be > 8kb after compression
|
|
125
|
-
const largeDescription = crypto.randomBytes(20_000).toString('hex');
|
|
126
|
-
const [{ test_id }] = pgwireRows(
|
|
127
|
-
await pool.query({
|
|
128
|
-
statement: `INSERT INTO test_data(name, description) VALUES('test1', $1) returning id as test_id`,
|
|
129
|
-
params: [{ type: 'varchar', value: largeDescription }]
|
|
130
|
-
})
|
|
131
|
-
);
|
|
132
|
-
|
|
133
|
-
await pool.query(`UPDATE test_data SET name = 'test2' WHERE id = '${test_id}'`);
|
|
134
|
-
|
|
135
|
-
const data = await context.getBucketData('global[]');
|
|
136
|
-
expect(data.slice(0, 1)).toMatchObject([
|
|
137
|
-
putOp('test_data', { id: test_id, name: 'test1', description: largeDescription })
|
|
138
|
-
]);
|
|
139
|
-
expect(data.slice(1)).toMatchObject([
|
|
140
|
-
putOp('test_data', { id: test_id, name: 'test2', description: largeDescription })
|
|
141
|
-
]);
|
|
142
|
-
})
|
|
143
|
-
);
|
|
144
|
-
|
|
145
|
-
test(
|
|
146
|
-
'replicating TRUNCATE',
|
|
147
|
-
walStreamTest(factory, async (context) => {
|
|
148
|
-
const { pool } = context;
|
|
149
|
-
const syncRuleContent = `
|
|
150
|
-
bucket_definitions:
|
|
151
|
-
global:
|
|
152
|
-
data:
|
|
153
|
-
- SELECT id, description FROM "test_data"
|
|
154
|
-
by_test_data:
|
|
155
|
-
parameters: SELECT id FROM test_data WHERE id = token_parameters.user_id
|
|
156
|
-
data: []
|
|
157
|
-
`;
|
|
158
|
-
await context.updateSyncRules(syncRuleContent);
|
|
159
|
-
await pool.query(`DROP TABLE IF EXISTS test_data`);
|
|
160
|
-
await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
|
|
161
|
-
|
|
162
|
-
await context.replicateSnapshot();
|
|
163
|
-
context.startStreaming();
|
|
164
|
-
|
|
165
|
-
const [{ test_id }] = pgwireRows(
|
|
166
|
-
await pool.query(`INSERT INTO test_data(description) VALUES('test1') returning id as test_id`)
|
|
167
|
-
);
|
|
168
|
-
await pool.query(`TRUNCATE test_data`);
|
|
169
|
-
|
|
170
|
-
const data = await context.getBucketData('global[]');
|
|
171
|
-
|
|
172
|
-
expect(data).toMatchObject([
|
|
173
|
-
putOp('test_data', { id: test_id, description: 'test1' }),
|
|
174
|
-
removeOp('test_data', test_id)
|
|
175
|
-
]);
|
|
176
|
-
})
|
|
177
|
-
);
|
|
178
|
-
|
|
179
|
-
test(
|
|
180
|
-
'replicating changing primary key',
|
|
181
|
-
walStreamTest(factory, async (context) => {
|
|
182
|
-
const { pool } = context;
|
|
183
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
184
|
-
await pool.query(`DROP TABLE IF EXISTS test_data`);
|
|
185
|
-
await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
|
|
186
|
-
|
|
187
|
-
await context.replicateSnapshot();
|
|
188
|
-
context.startStreaming();
|
|
189
|
-
|
|
190
|
-
const [{ test_id }] = pgwireRows(
|
|
191
|
-
await pool.query(`INSERT INTO test_data(description) VALUES('test1') returning id as test_id`)
|
|
192
|
-
);
|
|
193
|
-
|
|
194
|
-
const [{ test_id: test_id2 }] = pgwireRows(
|
|
195
|
-
await pool.query(
|
|
196
|
-
`UPDATE test_data SET id = uuid_generate_v4(), description = 'test2a' WHERE id = '${test_id}' returning id as test_id`
|
|
197
|
-
)
|
|
198
|
-
);
|
|
199
|
-
|
|
200
|
-
// This update may fail replicating with:
|
|
201
|
-
// Error: Update on missing record public.test_data:074a601e-fc78-4c33-a15d-f89fdd4af31d :: {"g":1,"t":"651e9fbe9fec6155895057ec","k":"1a0b34da-fb8c-5e6f-8421-d7a3c5d4df4f"}
|
|
202
|
-
await pool.query(`UPDATE test_data SET description = 'test2b' WHERE id = '${test_id2}'`);
|
|
203
|
-
|
|
204
|
-
// Re-use old id again
|
|
205
|
-
await pool.query(`INSERT INTO test_data(id, description) VALUES('${test_id}', 'test1b')`);
|
|
206
|
-
await pool.query(`UPDATE test_data SET description = 'test1c' WHERE id = '${test_id}'`);
|
|
207
|
-
|
|
208
|
-
const data = await context.getBucketData('global[]');
|
|
209
|
-
expect(data).toMatchObject([
|
|
210
|
-
// Initial insert
|
|
211
|
-
putOp('test_data', { id: test_id, description: 'test1' }),
|
|
212
|
-
// Update id, then description
|
|
213
|
-
removeOp('test_data', test_id),
|
|
214
|
-
putOp('test_data', { id: test_id2, description: 'test2a' }),
|
|
215
|
-
putOp('test_data', { id: test_id2, description: 'test2b' }),
|
|
216
|
-
// Re-use old id
|
|
217
|
-
putOp('test_data', { id: test_id, description: 'test1b' }),
|
|
218
|
-
putOp('test_data', { id: test_id, description: 'test1c' })
|
|
219
|
-
]);
|
|
220
|
-
})
|
|
221
|
-
);
|
|
222
|
-
|
|
223
|
-
test(
|
|
224
|
-
'initial sync',
|
|
225
|
-
walStreamTest(factory, async (context) => {
|
|
226
|
-
const { pool } = context;
|
|
227
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
228
|
-
|
|
229
|
-
await pool.query(`DROP TABLE IF EXISTS test_data`);
|
|
230
|
-
await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
|
|
231
|
-
|
|
232
|
-
const [{ test_id }] = pgwireRows(
|
|
233
|
-
await pool.query(`INSERT INTO test_data(description) VALUES('test1') returning id as test_id`)
|
|
234
|
-
);
|
|
235
|
-
|
|
236
|
-
await context.replicateSnapshot();
|
|
237
|
-
context.startStreaming();
|
|
238
|
-
|
|
239
|
-
const data = await context.getBucketData('global[]');
|
|
240
|
-
expect(data).toMatchObject([putOp('test_data', { id: test_id, description: 'test1' })]);
|
|
241
|
-
})
|
|
242
|
-
);
|
|
243
|
-
|
|
244
|
-
test(
|
|
245
|
-
'record too large',
|
|
246
|
-
walStreamTest(factory, async (context) => {
|
|
247
|
-
await context.updateSyncRules(`bucket_definitions:
|
|
248
|
-
global:
|
|
249
|
-
data:
|
|
250
|
-
- SELECT id, description, other FROM "test_data"`);
|
|
251
|
-
const { pool } = context;
|
|
252
|
-
|
|
253
|
-
await pool.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
|
|
254
|
-
|
|
255
|
-
await context.replicateSnapshot();
|
|
256
|
-
|
|
257
|
-
// 4MB
|
|
258
|
-
const largeDescription = crypto.randomBytes(2_000_000).toString('hex');
|
|
259
|
-
// 18MB
|
|
260
|
-
const tooLargeDescription = crypto.randomBytes(9_000_000).toString('hex');
|
|
261
|
-
|
|
262
|
-
await pool.query({
|
|
263
|
-
statement: `INSERT INTO test_data(id, description, other) VALUES('t1', $1, 'foo')`,
|
|
264
|
-
params: [{ type: 'varchar', value: tooLargeDescription }]
|
|
265
|
-
});
|
|
266
|
-
await pool.query({
|
|
267
|
-
statement: `UPDATE test_data SET description = $1 WHERE id = 't1'`,
|
|
268
|
-
params: [{ type: 'varchar', value: largeDescription }]
|
|
269
|
-
});
|
|
270
|
-
|
|
271
|
-
context.startStreaming();
|
|
272
|
-
|
|
273
|
-
const data = await context.getBucketData('global[]');
|
|
274
|
-
expect(data.length).toEqual(1);
|
|
275
|
-
const row = JSON.parse(data[0].data as string);
|
|
276
|
-
delete row.description;
|
|
277
|
-
expect(row).toEqual({ id: 't1', other: 'foo' });
|
|
278
|
-
delete data[0].data;
|
|
279
|
-
expect(data[0]).toMatchObject({ object_id: 't1', object_type: 'test_data', op: 'PUT', op_id: '1' });
|
|
280
|
-
})
|
|
281
|
-
);
|
|
282
|
-
|
|
283
|
-
test(
|
|
284
|
-
'table not in sync rules',
|
|
285
|
-
walStreamTest(factory, async (context) => {
|
|
286
|
-
const { pool } = context;
|
|
287
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
288
|
-
|
|
289
|
-
await pool.query(`CREATE TABLE test_donotsync(id uuid primary key default uuid_generate_v4(), description text)`);
|
|
290
|
-
|
|
291
|
-
await context.replicateSnapshot();
|
|
292
|
-
|
|
293
|
-
const metrics = container.getImplementation(Metrics);
|
|
294
|
-
const startRowCount = (await metrics.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
295
|
-
const startTxCount = (await metrics.getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
296
|
-
|
|
297
|
-
context.startStreaming();
|
|
298
|
-
|
|
299
|
-
const [{ test_id }] = pgwireRows(
|
|
300
|
-
await pool.query(`INSERT INTO test_donotsync(description) VALUES('test1') returning id as test_id`)
|
|
301
|
-
);
|
|
302
|
-
|
|
303
|
-
const data = await context.getBucketData('global[]');
|
|
304
|
-
|
|
305
|
-
expect(data).toMatchObject([]);
|
|
306
|
-
const endRowCount = (await metrics.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
307
|
-
const endTxCount = (await metrics.getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
308
|
-
|
|
309
|
-
// There was a transaction, but we should not replicate any actual data
|
|
310
|
-
expect(endRowCount - startRowCount).toEqual(0);
|
|
311
|
-
expect(endTxCount - startTxCount).toEqual(1);
|
|
312
|
-
})
|
|
313
|
-
);
|
|
314
|
-
}
|