@powersync/service-module-mongodb 0.9.0 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/CHANGELOG.md +32 -0
  2. package/dist/api/MongoRouteAPIAdapter.d.ts +1 -2
  3. package/dist/api/MongoRouteAPIAdapter.js +3 -6
  4. package/dist/api/MongoRouteAPIAdapter.js.map +1 -1
  5. package/dist/replication/ChangeStream.d.ts +27 -11
  6. package/dist/replication/ChangeStream.js +565 -288
  7. package/dist/replication/ChangeStream.js.map +1 -1
  8. package/dist/replication/ChangeStreamReplicationJob.d.ts +2 -0
  9. package/dist/replication/ChangeStreamReplicationJob.js +13 -5
  10. package/dist/replication/ChangeStreamReplicationJob.js.map +1 -1
  11. package/dist/replication/ChangeStreamReplicator.d.ts +1 -0
  12. package/dist/replication/ChangeStreamReplicator.js +21 -0
  13. package/dist/replication/ChangeStreamReplicator.js.map +1 -1
  14. package/dist/replication/MongoRelation.d.ts +9 -2
  15. package/dist/replication/MongoRelation.js +16 -5
  16. package/dist/replication/MongoRelation.js.map +1 -1
  17. package/dist/replication/MongoSnapshotQuery.d.ts +26 -0
  18. package/dist/replication/MongoSnapshotQuery.js +56 -0
  19. package/dist/replication/MongoSnapshotQuery.js.map +1 -0
  20. package/dist/replication/replication-utils.d.ts +2 -0
  21. package/dist/replication/replication-utils.js +3 -0
  22. package/dist/replication/replication-utils.js.map +1 -1
  23. package/package.json +9 -9
  24. package/src/api/MongoRouteAPIAdapter.ts +3 -7
  25. package/src/replication/ChangeStream.ts +371 -135
  26. package/src/replication/ChangeStreamReplicationJob.ts +14 -6
  27. package/src/replication/ChangeStreamReplicator.ts +23 -0
  28. package/src/replication/MongoRelation.ts +21 -6
  29. package/src/replication/MongoSnapshotQuery.ts +59 -0
  30. package/src/replication/replication-utils.ts +5 -0
  31. package/test/src/change_stream.test.ts +18 -13
  32. package/test/src/change_stream_utils.ts +47 -22
  33. package/test/src/chunked_snapshot.test.ts +153 -0
  34. package/test/src/resume.test.ts +7 -94
  35. package/test/src/resume_token.test.ts +78 -2
  36. package/test/src/resuming_snapshots.test.ts +138 -0
  37. package/test/src/slow_tests.test.ts +4 -18
  38. package/test/src/util.ts +12 -1
  39. package/tsconfig.tsbuildinfo +1 -1
@@ -1,5 +1,4 @@
1
- import { isMongoServerError } from '@powersync/lib-service-mongodb';
2
- import { container } from '@powersync/lib-services-framework';
1
+ import { container, logger as defaultLogger } from '@powersync/lib-services-framework';
3
2
  import { replication } from '@powersync/service-core';
4
3
 
5
4
  import { ChangeStream, ChangeStreamInvalidatedError } from './ChangeStream.js';
@@ -11,18 +10,21 @@ export interface ChangeStreamReplicationJobOptions extends replication.AbstractR
11
10
 
12
11
  export class ChangeStreamReplicationJob extends replication.AbstractReplicationJob {
13
12
  private connectionFactory: ConnectionManagerFactory;
13
+ private lastStream: ChangeStream | null = null;
14
14
 
15
15
  constructor(options: ChangeStreamReplicationJobOptions) {
16
16
  super(options);
17
17
  this.connectionFactory = options.connectionFactory;
18
+ // We use a custom formatter to process the prefix
19
+ this.logger = defaultLogger.child({ prefix: `[powersync_${this.storage.group_id}] ` });
18
20
  }
19
21
 
20
22
  async cleanUp(): Promise<void> {
21
- // TODO: Implement?
23
+ // Nothing needed here
22
24
  }
23
25
 
24
26
  async keepAlive() {
25
- // TODO: Implement?
27
+ // Nothing needed here
26
28
  }
27
29
 
28
30
  private get slotName() {
@@ -72,14 +74,16 @@ export class ChangeStreamReplicationJob extends replication.AbstractReplicationJ
72
74
  abort_signal: this.abortController.signal,
73
75
  storage: this.options.storage,
74
76
  metrics: this.options.metrics,
75
- connections: connectionManager
77
+ connections: connectionManager,
78
+ logger: this.logger
76
79
  });
80
+ this.lastStream = stream;
77
81
  await stream.replicate();
78
82
  } catch (e) {
79
83
  if (this.abortController.signal.aborted) {
80
84
  return;
81
85
  }
82
- this.logger.error(`${this.slotName} Replication error`, e);
86
+ this.logger.error(`Replication error`, e);
83
87
  if (e.cause != null) {
84
88
  // Without this additional log, the cause may not be visible in the logs.
85
89
  this.logger.error(`cause`, e.cause);
@@ -98,4 +102,8 @@ export class ChangeStreamReplicationJob extends replication.AbstractReplicationJ
98
102
  await connectionManager.end();
99
103
  }
100
104
  }
105
+
106
+ async getReplicationLagMillis(): Promise<number | undefined> {
107
+ return this.lastStream?.getReplicationLagMillis();
108
+ }
101
109
  }
@@ -3,6 +3,8 @@ import { ChangeStreamReplicationJob } from './ChangeStreamReplicationJob.js';
3
3
  import { ConnectionManagerFactory } from './ConnectionManagerFactory.js';
4
4
  import { MongoErrorRateLimiter } from './MongoErrorRateLimiter.js';
5
5
  import { MongoModule } from '../module/MongoModule.js';
6
+ import { MongoLSN } from '../common/MongoLSN.js';
7
+ import { timestampToDate } from './replication-utils.js';
6
8
 
7
9
  export interface ChangeStreamReplicatorOptions extends replication.AbstractReplicatorOptions {
8
10
  connectionFactory: ConnectionManagerFactory;
@@ -39,4 +41,25 @@ export class ChangeStreamReplicator extends replication.AbstractReplicator<Chang
39
41
  async testConnection() {
40
42
  return await MongoModule.testConnection(this.connectionFactory.dbConnectionConfig);
41
43
  }
44
+
45
+ async getReplicationLagMillis(): Promise<number | undefined> {
46
+ const lag = await super.getReplicationLagMillis();
47
+ if (lag != null) {
48
+ return lag;
49
+ }
50
+
51
+ // Booting or in an error loop. Check last active replication status.
52
+ // This includes sync rules in an ERROR state.
53
+ const content = await this.storage.getActiveSyncRulesContent();
54
+ if (content == null) {
55
+ return undefined;
56
+ }
57
+ // Measure the lag from the last resume token's time
58
+ const lsn = content.last_checkpoint_lsn;
59
+ if (lsn == null) {
60
+ return undefined;
61
+ }
62
+ const { timestamp } = MongoLSN.fromSerialized(lsn);
63
+ return Date.now() - timestampToDate(timestamp).getTime();
64
+ }
42
65
  }
@@ -20,7 +20,10 @@ export function getMongoRelation(source: mongo.ChangeStreamNameSpace): storage.S
20
20
  /**
21
21
  * For in-memory cache only.
22
22
  */
23
- export function getCacheIdentifier(source: storage.SourceEntityDescriptor): string {
23
+ export function getCacheIdentifier(source: storage.SourceEntityDescriptor | storage.SourceTable): string {
24
+ if (source instanceof storage.SourceTable) {
25
+ return `${source.schema}.${source.table}`;
26
+ }
24
27
  return `${source.schema}.${source.name}`;
25
28
  }
26
29
 
@@ -147,15 +150,27 @@ function filterJsonData(data: any, depth = 0): any {
147
150
  }
148
151
  }
149
152
 
150
- export async function createCheckpoint(client: mongo.MongoClient, db: mongo.Db): Promise<string> {
153
+ /**
154
+ * Id for checkpoints not associated with any specific replication stream.
155
+ *
156
+ * Use this for write checkpoints, or any other case where we want to process
157
+ * the checkpoint immediately, and not wait for batching.
158
+ */
159
+ export const STANDALONE_CHECKPOINT_ID = '_standalone_checkpoint';
160
+
161
+ export async function createCheckpoint(
162
+ client: mongo.MongoClient,
163
+ db: mongo.Db,
164
+ id: mongo.ObjectId | string
165
+ ): Promise<string> {
151
166
  const session = client.startSession();
152
167
  try {
153
- // Note: If multiple PowerSync instances are replicating the same source database,
154
- // they'll modify the same checkpoint document. This is fine - it could create
155
- // more replication load than required, but won't break anything.
168
+ // We use an unique id per process, and clear documents on startup.
169
+ // This is so that we can filter events for our own process only, and ignore
170
+ // events from other processes.
156
171
  await db.collection(CHECKPOINTS_COLLECTION).findOneAndUpdate(
157
172
  {
158
- _id: 'checkpoint' as any
173
+ _id: id as any
159
174
  },
160
175
  {
161
176
  $inc: { i: 1 }
@@ -0,0 +1,59 @@
1
+ import { mongo } from '@powersync/lib-service-mongodb';
2
+ import { ReplicationAssertionError } from '@powersync/lib-services-framework';
3
+ import { bson } from '@powersync/service-core';
4
+
5
+ /**
6
+ * Performs a collection snapshot query, chunking by ranges of _id.
7
+ *
8
+ * This may miss some rows if they are modified during the snapshot query.
9
+ * In that case, the change stream replication will pick up those rows afterwards.
10
+ */
11
+ export class ChunkedSnapshotQuery implements AsyncDisposable {
12
+ lastKey: any = null;
13
+ private lastCursor: mongo.FindCursor | null = null;
14
+ private collection: mongo.Collection;
15
+ private batchSize: number;
16
+
17
+ public constructor(options: { collection: mongo.Collection; batchSize: number; key?: Uint8Array | null }) {
18
+ this.lastKey = options.key ? bson.deserialize(options.key, { useBigInt64: true })._id : null;
19
+ this.lastCursor = null;
20
+ this.collection = options.collection;
21
+ this.batchSize = options.batchSize;
22
+ }
23
+
24
+ async nextChunk(): Promise<{ docs: mongo.Document[]; lastKey: Uint8Array } | { docs: []; lastKey: null }> {
25
+ let cursor = this.lastCursor;
26
+ let newCursor = false;
27
+ if (cursor == null || cursor.closed) {
28
+ const filter: mongo.Filter<mongo.Document> = this.lastKey == null ? {} : { _id: { $gt: this.lastKey as any } };
29
+ cursor = this.collection.find(filter, {
30
+ batchSize: this.batchSize,
31
+ readConcern: 'majority',
32
+ limit: this.batchSize,
33
+ sort: { _id: 1 }
34
+ });
35
+ newCursor = true;
36
+ }
37
+ const hasNext = await cursor.hasNext();
38
+ if (!hasNext) {
39
+ this.lastCursor = null;
40
+ if (newCursor) {
41
+ return { docs: [], lastKey: null };
42
+ } else {
43
+ return this.nextChunk();
44
+ }
45
+ }
46
+ const docBatch = cursor.readBufferedDocuments();
47
+ this.lastCursor = cursor;
48
+ if (docBatch.length == 0) {
49
+ throw new ReplicationAssertionError(`MongoDB snapshot query returned an empty batch, but hasNext() was true.`);
50
+ }
51
+ const lastKey = docBatch[docBatch.length - 1]._id;
52
+ this.lastKey = lastKey;
53
+ return { docs: docBatch, lastKey: bson.serialize({ _id: lastKey }) };
54
+ }
55
+
56
+ async [Symbol.asyncDispose](): Promise<void> {
57
+ await this.lastCursor?.close();
58
+ }
59
+ }
@@ -1,6 +1,7 @@
1
1
  import { ErrorCode, ServiceError } from '@powersync/lib-services-framework';
2
2
  import { MongoManager } from './MongoManager.js';
3
3
  import { PostImagesOption } from '../types/types.js';
4
+ import * as bson from 'bson';
4
5
 
5
6
  export const CHECKPOINTS_COLLECTION = '_powersync_checkpoints';
6
7
 
@@ -86,3 +87,7 @@ export async function checkSourceConfiguration(connectionManager: MongoManager):
86
87
  .toArray();
87
88
  }
88
89
  }
90
+
91
+ export function timestampToDate(timestamp: bson.Timestamp) {
92
+ return new Date(timestamp.getHighBitsUnsigned() * 1000);
93
+ }
@@ -8,8 +8,7 @@ import { test_utils } from '@powersync/service-core-tests';
8
8
 
9
9
  import { PostImagesOption } from '@module/types/types.js';
10
10
  import { ChangeStreamTestContext } from './change_stream_utils.js';
11
- import { env } from './env.js';
12
- import { INITIALIZED_MONGO_STORAGE_FACTORY, INITIALIZED_POSTGRES_STORAGE_FACTORY } from './util.js';
11
+ import { describeWithStorage } from './util.js';
13
12
 
14
13
  const BASIC_SYNC_RULES = `
15
14
  bucket_definitions:
@@ -18,12 +17,8 @@ bucket_definitions:
18
17
  - SELECT _id as id, description FROM "test_data"
19
18
  `;
20
19
 
21
- describe.skipIf(!env.TEST_MONGO_STORAGE)('change stream - mongodb', { timeout: 20_000 }, function () {
22
- defineChangeStreamTests(INITIALIZED_MONGO_STORAGE_FACTORY);
23
- });
24
-
25
- describe.skipIf(!env.TEST_POSTGRES_STORAGE)('change stream - postgres', { timeout: 20_000 }, function () {
26
- defineChangeStreamTests(INITIALIZED_POSTGRES_STORAGE_FACTORY);
20
+ describe('change stream', () => {
21
+ describeWithStorage({ timeout: 20_000 }, defineChangeStreamTests);
27
22
  });
28
23
 
29
24
  function defineChangeStreamTests(factory: storage.TestStorageFactory) {
@@ -97,7 +92,9 @@ bucket_definitions:
97
92
  });
98
93
 
99
94
  test('updateLookup - no fullDocument available', async () => {
100
- await using context = await ChangeStreamTestContext.open(factory, { postImages: PostImagesOption.OFF });
95
+ await using context = await ChangeStreamTestContext.open(factory, {
96
+ mongoOptions: { postImages: PostImagesOption.OFF }
97
+ });
101
98
  const { db, client } = context;
102
99
  await context.updateSyncRules(`
103
100
  bucket_definitions:
@@ -141,7 +138,9 @@ bucket_definitions:
141
138
  test('postImages - autoConfigure', async () => {
142
139
  // Similar to the above test, but with postImages enabled.
143
140
  // This resolves the consistency issue.
144
- await using context = await ChangeStreamTestContext.open(factory, { postImages: PostImagesOption.AUTO_CONFIGURE });
141
+ await using context = await ChangeStreamTestContext.open(factory, {
142
+ mongoOptions: { postImages: PostImagesOption.AUTO_CONFIGURE }
143
+ });
145
144
  const { db, client } = context;
146
145
  await context.updateSyncRules(`
147
146
  bucket_definitions:
@@ -187,7 +186,9 @@ bucket_definitions:
187
186
  test('postImages - on', async () => {
188
187
  // Similar to postImages - autoConfigure, but does not auto-configure.
189
188
  // changeStreamPreAndPostImages must be manually configured.
190
- await using context = await ChangeStreamTestContext.open(factory, { postImages: PostImagesOption.READ_ONLY });
189
+ await using context = await ChangeStreamTestContext.open(factory, {
190
+ mongoOptions: { postImages: PostImagesOption.READ_ONLY }
191
+ });
191
192
  const { db, client } = context;
192
193
  await context.updateSyncRules(`
193
194
  bucket_definitions:
@@ -432,7 +433,9 @@ bucket_definitions:
432
433
  });
433
434
 
434
435
  test('postImages - new collection with postImages enabled', async () => {
435
- await using context = await ChangeStreamTestContext.open(factory, { postImages: PostImagesOption.AUTO_CONFIGURE });
436
+ await using context = await ChangeStreamTestContext.open(factory, {
437
+ mongoOptions: { postImages: PostImagesOption.AUTO_CONFIGURE }
438
+ });
436
439
  const { db } = context;
437
440
  await context.updateSyncRules(`
438
441
  bucket_definitions:
@@ -463,7 +466,9 @@ bucket_definitions:
463
466
  });
464
467
 
465
468
  test('postImages - new collection with postImages disabled', async () => {
466
- await using context = await ChangeStreamTestContext.open(factory, { postImages: PostImagesOption.AUTO_CONFIGURE });
469
+ await using context = await ChangeStreamTestContext.open(factory, {
470
+ mongoOptions: { postImages: PostImagesOption.AUTO_CONFIGURE }
471
+ });
467
472
  const { db } = context;
468
473
  await context.updateSyncRules(`
469
474
  bucket_definitions:
@@ -4,15 +4,17 @@ import {
4
4
  createCoreReplicationMetrics,
5
5
  initializeCoreReplicationMetrics,
6
6
  InternalOpId,
7
+ OplogEntry,
7
8
  ProtocolOpId,
8
9
  ReplicationCheckpoint,
9
- SyncRulesBucketStorage
10
+ SyncRulesBucketStorage,
11
+ TestStorageOptions
10
12
  } from '@powersync/service-core';
11
13
  import { METRICS_HELPER, test_utils } from '@powersync/service-core-tests';
12
14
 
13
15
  import { ChangeStream, ChangeStreamOptions } from '@module/replication/ChangeStream.js';
14
16
  import { MongoManager } from '@module/replication/MongoManager.js';
15
- import { createCheckpoint } from '@module/replication/MongoRelation.js';
17
+ import { createCheckpoint, STANDALONE_CHECKPOINT_ID } from '@module/replication/MongoRelation.js';
16
18
  import { NormalizedMongoConnectionConfig } from '@module/types/types.js';
17
19
 
18
20
  import { TEST_CONNECTION_OPTIONS, clearTestDb } from './util.js';
@@ -29,17 +31,27 @@ export class ChangeStreamTestContext {
29
31
  *
30
32
  * This configures all the context, and tears it down afterwards.
31
33
  */
32
- static async open(factory: () => Promise<BucketStorageFactory>, options?: Partial<NormalizedMongoConnectionConfig>) {
33
- const f = await factory();
34
- const connectionManager = new MongoManager({ ...TEST_CONNECTION_OPTIONS, ...options });
34
+ static async open(
35
+ factory: (options: TestStorageOptions) => Promise<BucketStorageFactory>,
36
+ options?: {
37
+ doNotClear?: boolean;
38
+ mongoOptions?: Partial<NormalizedMongoConnectionConfig>;
39
+ streamOptions?: Partial<ChangeStreamOptions>;
40
+ }
41
+ ) {
42
+ const f = await factory({ doNotClear: options?.doNotClear });
43
+ const connectionManager = new MongoManager({ ...TEST_CONNECTION_OPTIONS, ...options?.mongoOptions });
35
44
 
36
- await clearTestDb(connectionManager.db);
37
- return new ChangeStreamTestContext(f, connectionManager);
45
+ if (!options?.doNotClear) {
46
+ await clearTestDb(connectionManager.db);
47
+ }
48
+ return new ChangeStreamTestContext(f, connectionManager, options?.streamOptions);
38
49
  }
39
50
 
40
51
  constructor(
41
52
  public factory: BucketStorageFactory,
42
- public connectionManager: MongoManager
53
+ public connectionManager: MongoManager,
54
+ private streamOptions?: Partial<ChangeStreamOptions>
43
55
  ) {
44
56
  createCoreReplicationMetrics(METRICS_HELPER.metricsEngine);
45
57
  initializeCoreReplicationMetrics(METRICS_HELPER.metricsEngine);
@@ -74,6 +86,16 @@ export class ChangeStreamTestContext {
74
86
  return this.storage!;
75
87
  }
76
88
 
89
+ async loadNextSyncRules() {
90
+ const syncRules = await this.factory.getNextSyncRulesContent();
91
+ if (syncRules == null) {
92
+ throw new Error(`Next sync rules not available`);
93
+ }
94
+
95
+ this.storage = this.factory.getInstance(syncRules);
96
+ return this.storage!;
97
+ }
98
+
77
99
  get walStream() {
78
100
  if (this.storage == null) {
79
101
  throw new Error('updateSyncRules() first');
@@ -88,7 +110,8 @@ export class ChangeStreamTestContext {
88
110
  abort_signal: this.abortController.signal,
89
111
  // Specifically reduce this from the default for tests on MongoDB <= 6.0, otherwise it can take
90
112
  // a long time to abort the stream.
91
- maxAwaitTimeMS: 200
113
+ maxAwaitTimeMS: this.streamOptions?.maxAwaitTimeMS ?? 200,
114
+ snapshotChunkLength: this.streamOptions?.snapshotChunkLength
92
115
  };
93
116
  this._walStream = new ChangeStream(options);
94
117
  return this._walStream!;
@@ -122,23 +145,25 @@ export class ChangeStreamTestContext {
122
145
  return test_utils.fromAsync(this.storage!.getBucketDataBatch(checkpoint, map));
123
146
  }
124
147
 
125
- async getBucketData(
126
- bucket: string,
127
- start?: ProtocolOpId | InternalOpId | undefined,
128
- options?: { timeout?: number; limit?: number; chunkLimitBytes?: number }
129
- ) {
148
+ async getBucketData(bucket: string, start?: ProtocolOpId | InternalOpId | undefined, options?: { timeout?: number }) {
130
149
  start ??= 0n;
131
150
  if (typeof start == 'string') {
132
151
  start = BigInt(start);
133
152
  }
134
- let checkpoint = await this.getCheckpoint(options);
153
+ const checkpoint = await this.getCheckpoint(options);
135
154
  const map = new Map<string, InternalOpId>([[bucket, start]]);
136
- const batch = this.storage!.getBucketDataBatch(checkpoint, map, {
137
- limit: options?.limit,
138
- chunkLimitBytes: options?.chunkLimitBytes
139
- });
140
- const batches = await test_utils.fromAsync(batch);
141
- return batches[0]?.chunkData.data ?? [];
155
+ let data: OplogEntry[] = [];
156
+ while (true) {
157
+ const batch = this.storage!.getBucketDataBatch(checkpoint, map);
158
+
159
+ const batches = await test_utils.fromAsync(batch);
160
+ data = data.concat(batches[0]?.chunkData.data ?? []);
161
+ if (batches.length == 0 || !batches[0]!.chunkData.has_more) {
162
+ break;
163
+ }
164
+ map.set(bucket, BigInt(batches[0]!.chunkData.next_after));
165
+ }
166
+ return data;
142
167
  }
143
168
 
144
169
  async getChecksums(buckets: string[], options?: { timeout?: number }) {
@@ -160,7 +185,7 @@ export async function getClientCheckpoint(
160
185
  options?: { timeout?: number }
161
186
  ): Promise<InternalOpId> {
162
187
  const start = Date.now();
163
- const lsn = await createCheckpoint(client, db);
188
+ const lsn = await createCheckpoint(client, db, STANDALONE_CHECKPOINT_ID);
164
189
  // This old API needs a persisted checkpoint id.
165
190
  // Since we don't use LSNs anymore, the only way to get that is to wait.
166
191
 
@@ -0,0 +1,153 @@
1
+ import { mongo } from '@powersync/lib-service-mongodb';
2
+ import { reduceBucket, TestStorageFactory } from '@powersync/service-core';
3
+ import { METRICS_HELPER } from '@powersync/service-core-tests';
4
+ import { JSONBig } from '@powersync/service-jsonbig';
5
+ import { SqliteJsonValue } from '@powersync/service-sync-rules';
6
+ import * as timers from 'timers/promises';
7
+ import { describe, expect, test } from 'vitest';
8
+ import { ChangeStreamTestContext } from './change_stream_utils.js';
9
+ import { describeWithStorage } from './util.js';
10
+
11
+ describe('chunked snapshots', () => {
12
+ describeWithStorage({ timeout: 120_000 }, defineBatchTests);
13
+ });
14
+
15
+ function defineBatchTests(factory: TestStorageFactory) {
16
+ // This is not as sensitive to the id type as postgres, but we still test a couple of cases
17
+ test('chunked snapshot (int32)', async () => {
18
+ await testChunkedSnapshot({
19
+ generateId(i) {
20
+ return i;
21
+ },
22
+ idToSqlite(id: number) {
23
+ return BigInt(id);
24
+ }
25
+ });
26
+ });
27
+
28
+ test('chunked snapshot (Timestamp)', async () => {
29
+ await testChunkedSnapshot({
30
+ generateId(i) {
31
+ return mongo.Timestamp.fromBits(Math.floor(i / 1000), i % 1000);
32
+ },
33
+ idToSqlite(id: mongo.Timestamp) {
34
+ return id.toBigInt();
35
+ }
36
+ });
37
+ });
38
+
39
+ test('chunked snapshot (compound)', async () => {
40
+ await testChunkedSnapshot({
41
+ generateId(i) {
42
+ return { a: Math.floor(i / 100), b: i % 100 };
43
+ },
44
+ idToSqlite(id: any) {
45
+ return JSON.stringify(id);
46
+ }
47
+ });
48
+ });
49
+
50
+ test('chunked snapshot (float)', async () => {
51
+ await testChunkedSnapshot({
52
+ generateId(i) {
53
+ // Floating-point operations are not exact, but it should be consistent at least
54
+ return i / Math.PI;
55
+ },
56
+ idToSqlite(id: any) {
57
+ return id;
58
+ }
59
+ });
60
+ });
61
+
62
+ async function testChunkedSnapshot(options: {
63
+ generateId: (i: number) => any;
64
+ idToSqlite?: (id: any) => SqliteJsonValue;
65
+ }) {
66
+ // This is not quite as much of an edge cases as with Postgres. We do still test that
67
+ // updates applied while replicating are applied correctly.
68
+ const idToSqlite = options.idToSqlite ?? ((n) => n);
69
+ const idToString = (id: any) => String(idToSqlite(id));
70
+
71
+ await using context = await ChangeStreamTestContext.open(factory, {
72
+ // We need to use a smaller chunk size here, so that we can run a query in between chunks
73
+ streamOptions: { snapshotChunkLength: 100 }
74
+ });
75
+
76
+ await context.updateSyncRules(`bucket_definitions:
77
+ global:
78
+ data:
79
+ - SELECT _id as id, description FROM test_data`);
80
+ const { db } = context;
81
+
82
+ let batch = db.collection('test_data').initializeUnorderedBulkOp();
83
+
84
+ // 1. Start with 2k rows...
85
+ for (let i = 1; i <= 2000; i++) {
86
+ batch.insert({ _id: options.generateId(i), description: 'foo' });
87
+ }
88
+ await batch.execute();
89
+
90
+ // 2. Replicate one batch of rows
91
+ // Our "stopping point" here is not quite deterministic.
92
+ const p = context.replicateSnapshot();
93
+
94
+ const stopAfter = 100;
95
+ const startRowCount = (await METRICS_HELPER.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
96
+
97
+ while (true) {
98
+ const count =
99
+ ((await METRICS_HELPER.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0) - startRowCount;
100
+
101
+ if (count >= stopAfter) {
102
+ break;
103
+ }
104
+ await timers.setTimeout(1);
105
+ }
106
+
107
+ // 3. Update some records
108
+ const idA = options.generateId(2000);
109
+ const idB = options.generateId(1);
110
+ await db.collection('test_data').updateOne({ _id: idA }, { $set: { description: 'bar' } });
111
+ await db.collection('test_data').updateOne({ _id: idB }, { $set: { description: 'baz' } });
112
+
113
+ // 4. Delete
114
+ const idC = options.generateId(1999);
115
+ await db.collection('test_data').deleteOne({ _id: idC });
116
+
117
+ // 5. Insert
118
+ const idD = options.generateId(2001);
119
+ await db.collection('test_data').insertOne({ _id: idD, description: 'new' });
120
+
121
+ // 4. Replicate the rest of the table.
122
+ await p;
123
+
124
+ context.startStreaming();
125
+
126
+ const data = await context.getBucketData('global[]');
127
+ const reduced = reduceBucket(data);
128
+
129
+ expect(reduced.find((row) => row.object_id == idToString(idA))?.data).toEqual(
130
+ JSONBig.stringify({
131
+ id: idToSqlite(idA),
132
+ description: 'bar'
133
+ })
134
+ );
135
+
136
+ expect(reduced.find((row) => row.object_id == idToString(idB))?.data).toEqual(
137
+ JSONBig.stringify({
138
+ id: idToSqlite(idB),
139
+ description: 'baz'
140
+ })
141
+ );
142
+
143
+ expect(reduced.find((row) => row.object_id == idToString(idC))).toBeUndefined();
144
+
145
+ expect(reduced.find((row) => row.object_id == idToString(idD))?.data).toEqual(
146
+ JSONBig.stringify({
147
+ id: idToSqlite(idD),
148
+ description: 'new'
149
+ })
150
+ );
151
+ expect(reduced.length).toEqual(2001);
152
+ }
153
+ }