@powersync/service-module-mongodb 0.5.1 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -14,7 +14,7 @@ import { MongoLSN } from '../common/MongoLSN.js';
14
14
  import { PostImagesOption } from '../types/types.js';
15
15
  import { escapeRegExp } from '../utils.js';
16
16
  import { MongoManager } from './MongoManager.js';
17
- import { constructAfterRecord, createCheckpoint, getMongoRelation } from './MongoRelation.js';
17
+ import { constructAfterRecord, createCheckpoint, getCacheIdentifier, getMongoRelation } from './MongoRelation.js';
18
18
  import { CHECKPOINTS_COLLECTION } from './replication-utils.js';
19
19
 
20
20
  export interface ChangeStreamOptions {
@@ -89,6 +89,10 @@ export class ChangeStream {
89
89
  return this.connections.options.postImages == PostImagesOption.AUTO_CONFIGURE;
90
90
  }
91
91
 
92
+ private get logPrefix() {
93
+ return `[powersync_${this.group_id}]`;
94
+ }
95
+
92
96
  /**
93
97
  * This resolves a pattern, persists the related metadata, and returns
94
98
  * the resulting SourceTables.
@@ -124,18 +128,13 @@ export class ChangeStream {
124
128
  .toArray();
125
129
 
126
130
  if (!tablePattern.isWildcard && collections.length == 0) {
127
- logger.warn(`Collection ${schema}.${tablePattern.name} not found`);
131
+ logger.warn(`${this.logPrefix} Collection ${schema}.${tablePattern.name} not found`);
128
132
  }
129
133
 
130
134
  for (let collection of collections) {
131
135
  const table = await this.handleRelation(
132
136
  batch,
133
- {
134
- name: collection.name,
135
- schema,
136
- objectId: collection.name,
137
- replicationColumns: [{ name: '_id' }]
138
- } as SourceEntityDescriptor,
137
+ getMongoRelation({ db: schema, coll: collection.name }),
139
138
  // This is done as part of the initial setup - snapshot is handled elsewhere
140
139
  { snapshot: false, collectionInfo: collection }
141
140
  );
@@ -149,7 +148,7 @@ export class ChangeStream {
149
148
  async initSlot(): Promise<InitResult> {
150
149
  const status = await this.storage.getStatus();
151
150
  if (status.snapshot_done && status.checkpoint_lsn) {
152
- logger.info(`Initial replication already done`);
151
+ logger.info(`${this.logPrefix} Initial replication already done`);
153
152
  return { needsInitialSync: false };
154
153
  }
155
154
 
@@ -220,7 +219,7 @@ export class ChangeStream {
220
219
  }
221
220
 
222
221
  const { comparable: lsn } = new MongoLSN({ timestamp: snapshotTime });
223
- logger.info(`Snapshot commit at ${snapshotTime.inspect()} / ${lsn}`);
222
+ logger.info(`${this.logPrefix} Snapshot commit at ${snapshotTime.inspect()} / ${lsn}`);
224
223
  await batch.commit(lsn);
225
224
  }
226
225
  );
@@ -289,7 +288,7 @@ export class ChangeStream {
289
288
  table: storage.SourceTable,
290
289
  session?: mongo.ClientSession
291
290
  ) {
292
- logger.info(`Replicating ${table.qualifiedName}`);
291
+ logger.info(`${this.logPrefix} Replicating ${table.qualifiedName}`);
293
292
  const estimatedCount = await this.estimatedCount(table);
294
293
  let at = 0;
295
294
  let lastLogIndex = 0;
@@ -319,7 +318,7 @@ export class ChangeStream {
319
318
 
320
319
  at += 1;
321
320
  if (at - lastLogIndex >= 5000) {
322
- logger.info(`[${this.group_id}] Replicating ${table.qualifiedName} ${at}/${estimatedCount}`);
321
+ logger.info(`${this.logPrefix} Replicating ${table.qualifiedName} ${at}/${estimatedCount}`);
323
322
  lastLogIndex = at;
324
323
  }
325
324
  Metrics.getInstance().rows_replicated_total.add(1);
@@ -328,14 +327,16 @@ export class ChangeStream {
328
327
  }
329
328
 
330
329
  await batch.flush();
331
- logger.info(`Replicated ${at} documents for ${table.qualifiedName}`);
330
+ logger.info(`${this.logPrefix} Replicated ${at} documents for ${table.qualifiedName}`);
332
331
  }
333
332
 
334
333
  private async getRelation(
335
334
  batch: storage.BucketStorageBatch,
336
- descriptor: SourceEntityDescriptor
335
+ descriptor: SourceEntityDescriptor,
336
+ options: { snapshot: boolean }
337
337
  ): Promise<SourceTable> {
338
- const existing = this.relation_cache.get(descriptor.objectId);
338
+ const cacheId = getCacheIdentifier(descriptor);
339
+ const existing = this.relation_cache.get(cacheId);
339
340
  if (existing != null) {
340
341
  return existing;
341
342
  }
@@ -344,7 +345,7 @@ export class ChangeStream {
344
345
  // missing values.
345
346
  const collection = await this.getCollectionInfo(descriptor.schema, descriptor.name);
346
347
 
347
- return this.handleRelation(batch, descriptor, { snapshot: false, collectionInfo: collection });
348
+ return this.handleRelation(batch, descriptor, { snapshot: options.snapshot, collectionInfo: collection });
348
349
  }
349
350
 
350
351
  private async getCollectionInfo(db: string, name: string): Promise<mongo.CollectionInfo | undefined> {
@@ -375,7 +376,7 @@ export class ChangeStream {
375
376
  collMod: collectionInfo.name,
376
377
  changeStreamPreAndPostImages: { enabled: true }
377
378
  });
378
- logger.info(`Enabled postImages on ${db}.${collectionInfo.name}`);
379
+ logger.info(`${this.logPrefix} Enabled postImages on ${db}.${collectionInfo.name}`);
379
380
  } else if (!enabled) {
380
381
  throw new ServiceError(ErrorCode.PSYNC_S1343, `postImages not enabled on ${db}.${collectionInfo.name}`);
381
382
  }
@@ -394,9 +395,6 @@ export class ChangeStream {
394
395
  }
395
396
 
396
397
  const snapshot = options.snapshot;
397
- if (!descriptor.objectId && typeof descriptor.objectId != 'string') {
398
- throw new ReplicationAssertionError('MongoDB replication - objectId expected');
399
- }
400
398
  const result = await this.storage.resolveTable({
401
399
  group_id: this.group_id,
402
400
  connection_id: this.connection_id,
@@ -404,10 +402,16 @@ export class ChangeStream {
404
402
  entity_descriptor: descriptor,
405
403
  sync_rules: this.sync_rules
406
404
  });
407
- this.relation_cache.set(descriptor.objectId, result.table);
405
+ this.relation_cache.set(getCacheIdentifier(descriptor), result.table);
408
406
 
409
- // Drop conflicting tables. This includes for example renamed tables.
410
- await batch.drop(result.dropTables);
407
+ // Drop conflicting collections.
408
+ // This is generally not expected for MongoDB source dbs, so we log an error.
409
+ if (result.dropTables.length > 0) {
410
+ logger.error(
411
+ `Conflicting collections found for ${JSON.stringify(descriptor)}. Dropping: ${result.dropTables.map((t) => t.id).join(', ')}`
412
+ );
413
+ await batch.drop(result.dropTables);
414
+ }
411
415
 
412
416
  // Snapshot if:
413
417
  // 1. Snapshot is requested (false for initial snapshot, since that process handles it elsewhere)
@@ -415,6 +419,7 @@ export class ChangeStream {
415
419
  // 3. The table is used in sync rules.
416
420
  const shouldSnapshot = snapshot && !result.table.snapshotComplete && result.table.syncAny;
417
421
  if (shouldSnapshot) {
422
+ logger.info(`${this.logPrefix} New collection: ${descriptor.schema}.${descriptor.name}`);
418
423
  // Truncate this table, in case a previous snapshot was interrupted.
419
424
  await batch.truncate([result.table]);
420
425
 
@@ -434,7 +439,7 @@ export class ChangeStream {
434
439
  change: mongo.ChangeStreamDocument
435
440
  ): Promise<storage.FlushedResult | null> {
436
441
  if (!table.syncAny) {
437
- logger.debug(`Collection ${table.qualifiedName} not used in sync rules - skipping`);
442
+ logger.debug(`${this.logPrefix} Collection ${table.qualifiedName} not used in sync rules - skipping`);
438
443
  return null;
439
444
  }
440
445
 
@@ -528,7 +533,7 @@ export class ChangeStream {
528
533
  const startAfter = lastLsn?.timestamp;
529
534
  const resumeAfter = lastLsn?.resumeToken;
530
535
 
531
- logger.info(`Resume streaming at ${startAfter?.inspect()} / ${lastLsn}`);
536
+ logger.info(`${this.logPrefix} Resume streaming at ${startAfter?.inspect()} / ${lastLsn}`);
532
537
 
533
538
  const filters = this.getSourceNamespaceFilters();
534
539
 
@@ -552,7 +557,6 @@ export class ChangeStream {
552
557
 
553
558
  const streamOptions: mongo.ChangeStreamOptions = {
554
559
  showExpandedEvents: true,
555
- useBigInt64: true,
556
560
  maxAwaitTimeMS: 200,
557
561
  fullDocument: fullDocument
558
562
  };
@@ -591,13 +595,14 @@ export class ChangeStream {
591
595
 
592
596
  let splitDocument: mongo.ChangeStreamDocument | null = null;
593
597
 
598
+ let flexDbNameWorkaroundLogged = false;
599
+
594
600
  while (true) {
595
601
  if (this.abort_signal.aborted) {
596
602
  break;
597
603
  }
598
604
 
599
605
  const originalChangeDocument = await stream.tryNext();
600
-
601
606
  // The stream was closed, we will only ever receive `null` from it
602
607
  if (!originalChangeDocument && stream.closed) {
603
608
  break;
@@ -637,6 +642,29 @@ export class ChangeStream {
637
642
  throw new ReplicationAssertionError(`Incomplete splitEvent: ${JSON.stringify(splitDocument.splitEvent)}`);
638
643
  }
639
644
 
645
+ if (
646
+ !filters.multipleDatabases &&
647
+ 'ns' in changeDocument &&
648
+ changeDocument.ns.db != this.defaultDb.databaseName &&
649
+ changeDocument.ns.db.endsWith(`_${this.defaultDb.databaseName}`)
650
+ ) {
651
+ // When all of the following conditions are met:
652
+ // 1. We're replicating from an Atlas Flex instance.
653
+ // 2. There were changestream events recorded while the PowerSync service is paused.
654
+ // 3. We're only replicating from a single database.
655
+ // Then we've observed an ns with for example {db: '67b83e86cd20730f1e766dde_ps'},
656
+ // instead of the expected {db: 'ps'}.
657
+ // We correct this.
658
+ changeDocument.ns.db = this.defaultDb.databaseName;
659
+
660
+ if (!flexDbNameWorkaroundLogged) {
661
+ flexDbNameWorkaroundLogged = true;
662
+ logger.warn(
663
+ `${this.logPrefix} Incorrect DB name in change stream: ${changeDocument.ns.db}. Changed to ${this.defaultDb.databaseName}.`
664
+ );
665
+ }
666
+ }
667
+
640
668
  if (
641
669
  (changeDocument.operationType == 'insert' ||
642
670
  changeDocument.operationType == 'update' ||
@@ -683,28 +711,44 @@ export class ChangeStream {
683
711
  waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb);
684
712
  }
685
713
  const rel = getMongoRelation(changeDocument.ns);
686
- const table = await this.getRelation(batch, rel);
714
+ const table = await this.getRelation(batch, rel, {
715
+ // In most cases, we should not need to snapshot this. But if this is the first time we see the collection
716
+ // for whatever reason, then we do need to snapshot it.
717
+ // This may result in some duplicate operations when a collection is created for the first time after
718
+ // sync rules was deployed.
719
+ snapshot: true
720
+ });
687
721
  if (table.syncAny) {
688
722
  await this.writeChange(batch, table, changeDocument);
689
723
  }
690
724
  } else if (changeDocument.operationType == 'drop') {
691
725
  const rel = getMongoRelation(changeDocument.ns);
692
- const table = await this.getRelation(batch, rel);
726
+ const table = await this.getRelation(batch, rel, {
727
+ // We're "dropping" this collection, so never snapshot it.
728
+ snapshot: false
729
+ });
693
730
  if (table.syncAny) {
694
731
  await batch.drop([table]);
695
- this.relation_cache.delete(table.objectId);
732
+ this.relation_cache.delete(getCacheIdentifier(rel));
696
733
  }
697
734
  } else if (changeDocument.operationType == 'rename') {
698
735
  const relFrom = getMongoRelation(changeDocument.ns);
699
736
  const relTo = getMongoRelation(changeDocument.to);
700
- const tableFrom = await this.getRelation(batch, relFrom);
737
+ const tableFrom = await this.getRelation(batch, relFrom, {
738
+ // We're "dropping" this collection, so never snapshot it.
739
+ snapshot: false
740
+ });
701
741
  if (tableFrom.syncAny) {
702
742
  await batch.drop([tableFrom]);
703
- this.relation_cache.delete(tableFrom.objectId);
743
+ this.relation_cache.delete(getCacheIdentifier(relFrom));
704
744
  }
705
745
  // Here we do need to snapshot the new table
706
746
  const collection = await this.getCollectionInfo(relTo.schema, relTo.name);
707
- await this.handleRelation(batch, relTo, { snapshot: true, collectionInfo: collection });
747
+ await this.handleRelation(batch, relTo, {
748
+ // This is a new (renamed) collection, so always snapshot it.
749
+ snapshot: true,
750
+ collectionInfo: collection
751
+ });
708
752
  }
709
753
  }
710
754
  }
@@ -40,8 +40,8 @@ export class ChangeStreamReplicationJob extends replication.AbstractReplicationJ
40
40
  this.logger.error(`Replication failed`, e);
41
41
 
42
42
  if (e instanceof ChangeStreamInvalidatedError) {
43
- // This stops replication on this slot, and creates a new slot
44
- await this.options.storage.factory.slotRemoved(this.slotName);
43
+ // This stops replication and restarts with a new instance
44
+ await this.options.storage.factory.restartReplication(this.storage.group_id);
45
45
  }
46
46
  } finally {
47
47
  this.abortController.abort();
@@ -1,11 +1,9 @@
1
1
  import { mongo } from '@powersync/lib-service-mongodb';
2
2
 
3
3
  import { NormalizedMongoConnectionConfig } from '../types/types.js';
4
+ import { BSON_DESERIALIZE_DATA_OPTIONS } from '@powersync/service-core';
4
5
 
5
6
  export class MongoManager {
6
- /**
7
- * Do not use this for any transactions.
8
- */
9
7
  public readonly client: mongo.MongoClient;
10
8
  public readonly db: mongo.Db;
11
9
 
@@ -35,6 +33,9 @@ export class MongoManager {
35
33
 
36
34
  maxConnecting: 3,
37
35
  maxIdleTimeMS: 60_000,
36
+
37
+ ...BSON_DESERIALIZE_DATA_OPTIONS,
38
+
38
39
  ...overrides
39
40
  });
40
41
  this.db = this.client.db(options.database, {});
@@ -11,11 +11,19 @@ export function getMongoRelation(source: mongo.ChangeStreamNameSpace): storage.S
11
11
  return {
12
12
  name: source.coll,
13
13
  schema: source.db,
14
- objectId: source.coll,
14
+ // Not relevant for MongoDB - we use db + coll name as the identifier
15
+ objectId: undefined,
15
16
  replicationColumns: [{ name: '_id' }]
16
17
  } satisfies storage.SourceEntityDescriptor;
17
18
  }
18
19
 
20
+ /**
21
+ * For in-memory cache only.
22
+ */
23
+ export function getCacheIdentifier(source: storage.SourceEntityDescriptor): string {
24
+ return `${source.schema}.${source.name}`;
25
+ }
26
+
19
27
  export function constructAfterRecord(document: mongo.Document): SqliteRow {
20
28
  let record: SqliteRow = {};
21
29
  for (let key of Object.keys(document)) {
@@ -239,6 +239,7 @@ bucket_definitions:
239
239
  - SELECT _id as id, description FROM "test_DATA"
240
240
  `);
241
241
 
242
+ await db.createCollection('test_DATA');
242
243
  await context.replicateSnapshot();
243
244
 
244
245
  context.startStreaming();
@@ -261,6 +262,7 @@ bucket_definitions:
261
262
  data:
262
263
  - SELECT _id as id, name, description FROM "test_data"
263
264
  `);
265
+ await db.createCollection('test_data');
264
266
 
265
267
  await context.replicateSnapshot();
266
268
  context.startStreaming();
@@ -371,6 +373,8 @@ bucket_definitions:
371
373
  - SELECT _id as id, name, other FROM "test_data"`);
372
374
  const { db } = context;
373
375
 
376
+ await db.createCollection('test_data');
377
+
374
378
  await context.replicateSnapshot();
375
379
 
376
380
  const collection = db.collection('test_data');
@@ -451,6 +455,8 @@ bucket_definitions:
451
455
 
452
456
  const data = await context.getBucketData('global[]');
453
457
  expect(data).toMatchObject([
458
+ // An extra op here, since this triggers a snapshot in addition to getting the event.
459
+ test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test2' }),
454
460
  test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test1' }),
455
461
  test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test2' })
456
462
  ]);
@@ -1,5 +1,5 @@
1
1
  import { mongo } from '@powersync/lib-service-mongodb';
2
- import { ActiveCheckpoint, BucketStorageFactory, OpId, SyncRulesBucketStorage } from '@powersync/service-core';
2
+ import { BucketStorageFactory, OpId, ReplicationCheckpoint, SyncRulesBucketStorage } from '@powersync/service-core';
3
3
  import { test_utils } from '@powersync/service-core-tests';
4
4
 
5
5
  import { ChangeStream, ChangeStreamOptions } from '@module/replication/ChangeStream.js';
@@ -138,7 +138,7 @@ export class ChangeStreamTestContext {
138
138
  export async function getClientCheckpoint(
139
139
  client: mongo.MongoClient,
140
140
  db: mongo.Db,
141
- bucketStorage: BucketStorageFactory,
141
+ storageFactory: BucketStorageFactory,
142
142
  options?: { timeout?: number }
143
143
  ): Promise<OpId> {
144
144
  const start = Date.now();
@@ -147,14 +147,15 @@ export async function getClientCheckpoint(
147
147
  // Since we don't use LSNs anymore, the only way to get that is to wait.
148
148
 
149
149
  const timeout = options?.timeout ?? 50_000;
150
- let lastCp: ActiveCheckpoint | null = null;
150
+ let lastCp: ReplicationCheckpoint | null = null;
151
151
 
152
152
  while (Date.now() - start < timeout) {
153
- const cp = await bucketStorage.getActiveCheckpoint();
154
- lastCp = cp;
155
- if (!cp.hasSyncRules()) {
153
+ const storage = await storageFactory.getActiveStorage();
154
+ const cp = await storage?.getCheckpoint();
155
+ if (cp == null) {
156
156
  throw new Error('No sync rules available');
157
157
  }
158
+ lastCp = cp;
158
159
  if (cp.lsn && cp.lsn >= lsn) {
159
160
  return cp.checkpoint;
160
161
  }
@@ -47,6 +47,13 @@ describe('mongo data types', () => {
47
47
  'mydb',
48
48
  { foo: 'bar' }
49
49
  )
50
+ },
51
+ {
52
+ _id: 6 as any,
53
+ int4: -1,
54
+ int8: -9007199254740993n,
55
+ float: -3.14,
56
+ decimal: new mongo.Decimal128('-3.14')
50
57
  }
51
58
  ]);
52
59
  }
@@ -109,6 +116,13 @@ describe('mongo data types', () => {
109
116
  },
110
117
  { _id: 2 as any, nested: [{ test: 'thing' }] },
111
118
  { _id: 3 as any, date: [new Date('2023-03-06 15:47+02')] },
119
+ {
120
+ _id: 6 as any,
121
+ int4: [-1],
122
+ int8: [-9007199254740993n],
123
+ float: [-3.14],
124
+ decimal: [new mongo.Decimal128('-3.14')]
125
+ },
112
126
  {
113
127
  _id: 10 as any,
114
128
  timestamp: [mongo.Timestamp.fromBits(123, 456)],
@@ -164,6 +178,14 @@ describe('mongo data types', () => {
164
178
 
165
179
  // This must specifically be null, and not undefined.
166
180
  expect(transformed[4].undefined).toBeNull();
181
+
182
+ expect(transformed[5]).toMatchObject({
183
+ _id: 6n,
184
+ int4: -1n,
185
+ int8: -9007199254740993n,
186
+ float: -3.14,
187
+ decimal: '-3.14'
188
+ });
167
189
  }
168
190
 
169
191
  function checkResultsNested(transformed: Record<string, any>[]) {
@@ -193,6 +215,19 @@ describe('mongo data types', () => {
193
215
  });
194
216
 
195
217
  expect(transformed[3]).toMatchObject({
218
+ _id: 5n,
219
+ undefined: '[null]'
220
+ });
221
+
222
+ expect(transformed[4]).toMatchObject({
223
+ _id: 6n,
224
+ int4: '[-1]',
225
+ int8: '[-9007199254740993]',
226
+ float: '[-3.14]',
227
+ decimal: '["-3.14"]'
228
+ });
229
+
230
+ expect(transformed[5]).toMatchObject({
196
231
  _id: 10n,
197
232
  objectId: '["66e834cc91d805df11fa0ecb"]',
198
233
  timestamp: '[1958505087099]',
@@ -203,10 +238,6 @@ describe('mongo data types', () => {
203
238
  minKey: '[null]',
204
239
  maxKey: '[null]'
205
240
  });
206
-
207
- expect(transformed[4]).toMatchObject({
208
- undefined: '[null]'
209
- });
210
241
  }
211
242
 
212
243
  test('test direct queries', async () => {
@@ -218,11 +249,13 @@ describe('mongo data types', () => {
218
249
  await insert(collection);
219
250
  await insertUndefined(db, 'test_data');
220
251
 
221
- const rawResults = await db.collection('test_data').find().toArray();
252
+ const rawResults = await db
253
+ .collection('test_data')
254
+ .find({}, { sort: { _id: 1 } })
255
+ .toArray();
222
256
  // It is tricky to save "undefined" with mongo, so we check that it succeeded.
223
257
  expect(rawResults[4].undefined).toBeUndefined();
224
258
  const transformed = [...ChangeStream.getQueryData(rawResults)];
225
-
226
259
  checkResults(transformed);
227
260
  } finally {
228
261
  await client.close();
@@ -238,8 +271,11 @@ describe('mongo data types', () => {
238
271
  await insertNested(collection);
239
272
  await insertUndefined(db, 'test_data_arrays', true);
240
273
 
241
- const rawResults = await db.collection('test_data_arrays').find().toArray();
242
- expect(rawResults[4].undefined).toEqual([undefined]);
274
+ const rawResults = await db
275
+ .collection('test_data_arrays')
276
+ .find({}, { sort: { _id: 1 } })
277
+ .toArray();
278
+ expect(rawResults[3].undefined).toEqual([undefined]);
243
279
  const transformed = [...ChangeStream.getQueryData(rawResults)];
244
280
 
245
281
  checkResultsNested(transformed);
@@ -257,7 +293,6 @@ describe('mongo data types', () => {
257
293
  await setupTable(db);
258
294
 
259
295
  const stream = db.watch([], {
260
- useBigInt64: true,
261
296
  maxAwaitTimeMS: 50,
262
297
  fullDocument: 'updateLookup'
263
298
  });
@@ -267,7 +302,7 @@ describe('mongo data types', () => {
267
302
  await insert(collection);
268
303
  await insertUndefined(db, 'test_data');
269
304
 
270
- const transformed = await getReplicationTx(stream, 5);
305
+ const transformed = await getReplicationTx(stream, 6);
271
306
 
272
307
  checkResults(transformed);
273
308
  } finally {
@@ -282,7 +317,6 @@ describe('mongo data types', () => {
282
317
  await setupTable(db);
283
318
 
284
319
  const stream = db.watch([], {
285
- useBigInt64: true,
286
320
  maxAwaitTimeMS: 50,
287
321
  fullDocument: 'updateLookup'
288
322
  });
@@ -292,7 +326,7 @@ describe('mongo data types', () => {
292
326
  await insertNested(collection);
293
327
  await insertUndefined(db, 'test_data_arrays', true);
294
328
 
295
- const transformed = await getReplicationTx(stream, 5);
329
+ const transformed = await getReplicationTx(stream, 6);
296
330
 
297
331
  checkResultsNested(transformed);
298
332
  } finally {
@@ -505,5 +539,6 @@ async function getReplicationTx(replicationStream: mongo.ChangeStream, count: nu
505
539
  break;
506
540
  }
507
541
  }
542
+ transformed.sort((a, b) => Number(a._id) - Number(b._id));
508
543
  return transformed;
509
544
  }
package/test/src/util.ts CHANGED
@@ -4,6 +4,7 @@ import * as postgres_storage from '@powersync/service-module-postgres-storage';
4
4
 
5
5
  import * as types from '@module/types/types.js';
6
6
  import { env } from './env.js';
7
+ import { BSON_DESERIALIZE_DATA_OPTIONS } from '@powersync/service-core';
7
8
 
8
9
  export const TEST_URI = env.MONGO_TEST_DATA_URL;
9
10
 
@@ -30,7 +31,7 @@ export async function connectMongoData() {
30
31
  connectTimeoutMS: env.CI ? 15_000 : 5_000,
31
32
  socketTimeoutMS: env.CI ? 15_000 : 5_000,
32
33
  serverSelectionTimeoutMS: env.CI ? 15_000 : 2_500,
33
- useBigInt64: true
34
+ ...BSON_DESERIALIZE_DATA_OPTIONS
34
35
  });
35
36
  const dbname = new URL(env.MONGO_TEST_DATA_URL).pathname.substring(1);
36
37
  return { client, db: client.db(dbname) };