@powersync/service-module-mongodb 0.12.9 → 0.12.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +19 -0
- package/dist/replication/ChangeStreamReplicationJob.d.ts +0 -2
- package/dist/replication/ChangeStreamReplicationJob.js +13 -38
- package/dist/replication/ChangeStreamReplicationJob.js.map +1 -1
- package/dist/replication/ChangeStreamReplicator.js +1 -2
- package/dist/replication/ChangeStreamReplicator.js.map +1 -1
- package/dist/replication/ConnectionManagerFactory.js +8 -4
- package/dist/replication/ConnectionManagerFactory.js.map +1 -1
- package/dist/replication/MongoErrorRateLimiter.js +9 -4
- package/dist/replication/MongoErrorRateLimiter.js.map +1 -1
- package/dist/replication/MongoManager.d.ts +5 -2
- package/dist/replication/MongoManager.js +6 -4
- package/dist/replication/MongoManager.js.map +1 -1
- package/package.json +5 -5
- package/src/replication/ChangeStreamReplicationJob.ts +16 -39
- package/src/replication/ChangeStreamReplicator.ts +1 -1
- package/src/replication/ConnectionManagerFactory.ts +9 -4
- package/src/replication/MongoErrorRateLimiter.ts +8 -4
- package/src/replication/MongoManager.ts +10 -5
- package/test/src/change_stream_utils.ts +9 -2
- package/test/src/resuming_snapshots.test.ts +105 -96
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
import { describe, expect, test } from 'vitest';
|
|
2
|
-
import { env } from './env.js';
|
|
3
|
-
import { describeWithStorage } from './util.js';
|
|
4
1
|
import { TestStorageFactory } from '@powersync/service-core';
|
|
5
2
|
import { METRICS_HELPER } from '@powersync/service-core-tests';
|
|
6
3
|
import { ReplicationMetric } from '@powersync/service-types';
|
|
7
4
|
import * as timers from 'node:timers/promises';
|
|
5
|
+
import { describe, expect, test } from 'vitest';
|
|
8
6
|
import { ChangeStreamTestContext } from './change_stream_utils.js';
|
|
7
|
+
import { env } from './env.js';
|
|
8
|
+
import { describeWithStorage } from './util.js';
|
|
9
9
|
|
|
10
10
|
describe.skipIf(!(env.CI || env.SLOW_TESTS))('batch replication', function () {
|
|
11
11
|
describeWithStorage({ timeout: 240_000 }, function (factory) {
|
|
@@ -32,107 +32,116 @@ async function testResumingReplication(factory: TestStorageFactory, stopAfter: n
|
|
|
32
32
|
// have been / have not been replicated at that point is not deterministic.
|
|
33
33
|
// We do allow for some variation in the test results to account for this.
|
|
34
34
|
|
|
35
|
-
|
|
35
|
+
let startRowCount: number;
|
|
36
|
+
|
|
37
|
+
{
|
|
38
|
+
await using context = await ChangeStreamTestContext.open(factory, { streamOptions: { snapshotChunkLength: 1000 } });
|
|
36
39
|
|
|
37
|
-
|
|
40
|
+
await context.updateSyncRules(`bucket_definitions:
|
|
38
41
|
global:
|
|
39
42
|
data:
|
|
40
43
|
- SELECT _id as id, description FROM test_data1
|
|
41
44
|
- SELECT _id as id, description FROM test_data2`);
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
45
|
+
const { db } = context;
|
|
46
|
+
|
|
47
|
+
let batch = db.collection('test_data1').initializeUnorderedBulkOp();
|
|
48
|
+
for (let i = 1; i <= 1000; i++) {
|
|
49
|
+
batch.insert({ _id: i, description: 'foo' });
|
|
50
|
+
}
|
|
51
|
+
await batch.execute();
|
|
52
|
+
batch = db.collection('test_data2').initializeUnorderedBulkOp();
|
|
53
|
+
for (let i = 1; i <= 10000; i++) {
|
|
54
|
+
batch.insert({ _id: i, description: 'foo' });
|
|
55
|
+
}
|
|
56
|
+
await batch.execute();
|
|
57
|
+
|
|
58
|
+
const p = context.replicateSnapshot().catch((e) => ({ error: e }));
|
|
59
|
+
|
|
60
|
+
let done = false;
|
|
61
|
+
|
|
62
|
+
startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0;
|
|
63
|
+
try {
|
|
64
|
+
(async () => {
|
|
65
|
+
while (!done) {
|
|
66
|
+
const count =
|
|
67
|
+
((await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0) - startRowCount;
|
|
68
|
+
|
|
69
|
+
if (count >= stopAfter) {
|
|
70
|
+
break;
|
|
71
|
+
}
|
|
72
|
+
await timers.setTimeout(1);
|
|
68
73
|
}
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
74
|
+
// This interrupts initial replication
|
|
75
|
+
// We don't dispose the context here yet, since closing the database connection while in use
|
|
76
|
+
// results in unpredictable error conditions.
|
|
77
|
+
context.abort();
|
|
78
|
+
})();
|
|
79
|
+
// This confirms that initial replication was interrupted
|
|
80
|
+
await expect(await p).haveOwnProperty('error');
|
|
81
|
+
} finally {
|
|
82
|
+
done = true;
|
|
83
|
+
}
|
|
79
84
|
}
|
|
80
85
|
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
+
{
|
|
87
|
+
// Bypass the usual "clear db on factory open" step.
|
|
88
|
+
await using context2 = await ChangeStreamTestContext.open(factory, {
|
|
89
|
+
doNotClear: true,
|
|
90
|
+
streamOptions: { snapshotChunkLength: 1000 }
|
|
91
|
+
});
|
|
86
92
|
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
93
|
+
const { db } = context2;
|
|
94
|
+
|
|
95
|
+
// This delete should be using one of the ids already replicated
|
|
96
|
+
await db.collection('test_data2').deleteOne({ _id: 1 as any });
|
|
97
|
+
await db.collection('test_data2').updateOne({ _id: 2 as any }, { $set: { description: 'update1' } });
|
|
98
|
+
await db.collection('test_data2').insertOne({ _id: 10001 as any, description: 'insert1' });
|
|
99
|
+
|
|
100
|
+
await context2.loadNextSyncRules();
|
|
101
|
+
await context2.replicateSnapshot();
|
|
102
|
+
|
|
103
|
+
context2.startStreaming();
|
|
104
|
+
const data = await context2.getBucketData('global[]', undefined, {});
|
|
105
|
+
|
|
106
|
+
const deletedRowOps = data.filter((row) => row.object_type == 'test_data2' && row.object_id === '1');
|
|
107
|
+
const updatedRowOps = data.filter((row) => row.object_type == 'test_data2' && row.object_id === '2');
|
|
108
|
+
const insertedRowOps = data.filter((row) => row.object_type == 'test_data2' && row.object_id === '10001');
|
|
109
|
+
|
|
110
|
+
if (deletedRowOps.length != 0) {
|
|
111
|
+
// The deleted row was part of the first replication batch,
|
|
112
|
+
// so it is removed by streaming replication.
|
|
113
|
+
expect(deletedRowOps.length).toEqual(2);
|
|
114
|
+
expect(deletedRowOps[1].op).toEqual('REMOVE');
|
|
115
|
+
} else {
|
|
116
|
+
// The deleted row was not part of the first replication batch,
|
|
117
|
+
// so it's not in the resulting ops at all.
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
expect(updatedRowOps.length).toEqual(2);
|
|
121
|
+
// description for the first op could be 'foo' or 'update1'.
|
|
122
|
+
// We only test the final version.
|
|
123
|
+
expect(JSON.parse(updatedRowOps[1].data as string).description).toEqual('update1');
|
|
124
|
+
|
|
125
|
+
expect(insertedRowOps.length).toEqual(2);
|
|
126
|
+
expect(JSON.parse(insertedRowOps[0].data as string).description).toEqual('insert1');
|
|
127
|
+
expect(JSON.parse(insertedRowOps[1].data as string).description).toEqual('insert1');
|
|
128
|
+
|
|
129
|
+
// 1000 of test_data1 during first replication attempt.
|
|
130
|
+
// N >= 1000 of test_data2 during first replication attempt.
|
|
131
|
+
// 10000 - N - 1 + 1 of test_data2 during second replication attempt.
|
|
132
|
+
// An additional update during streaming replication (2x total for this row).
|
|
133
|
+
// An additional insert during streaming replication (2x total for this row).
|
|
134
|
+
// If the deleted row was part of the first replication batch, it's removed by streaming replication.
|
|
135
|
+
// This adds 2 ops.
|
|
136
|
+
// We expect this to be 11002 for stopAfter: 2000, and 11004 for stopAfter: 8000.
|
|
137
|
+
// However, this is not deterministic.
|
|
138
|
+
const expectedCount = 11002 + deletedRowOps.length;
|
|
139
|
+
expect(data.length).toEqual(expectedCount);
|
|
140
|
+
|
|
141
|
+
const replicatedCount =
|
|
142
|
+
((await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0) - startRowCount;
|
|
143
|
+
|
|
144
|
+
// With resumable replication, there should be no need to re-replicate anything.
|
|
145
|
+
expect(replicatedCount).toEqual(expectedCount);
|
|
110
146
|
}
|
|
111
|
-
|
|
112
|
-
expect(updatedRowOps.length).toEqual(2);
|
|
113
|
-
// description for the first op could be 'foo' or 'update1'.
|
|
114
|
-
// We only test the final version.
|
|
115
|
-
expect(JSON.parse(updatedRowOps[1].data as string).description).toEqual('update1');
|
|
116
|
-
|
|
117
|
-
expect(insertedRowOps.length).toEqual(2);
|
|
118
|
-
expect(JSON.parse(insertedRowOps[0].data as string).description).toEqual('insert1');
|
|
119
|
-
expect(JSON.parse(insertedRowOps[1].data as string).description).toEqual('insert1');
|
|
120
|
-
|
|
121
|
-
// 1000 of test_data1 during first replication attempt.
|
|
122
|
-
// N >= 1000 of test_data2 during first replication attempt.
|
|
123
|
-
// 10000 - N - 1 + 1 of test_data2 during second replication attempt.
|
|
124
|
-
// An additional update during streaming replication (2x total for this row).
|
|
125
|
-
// An additional insert during streaming replication (2x total for this row).
|
|
126
|
-
// If the deleted row was part of the first replication batch, it's removed by streaming replication.
|
|
127
|
-
// This adds 2 ops.
|
|
128
|
-
// We expect this to be 11002 for stopAfter: 2000, and 11004 for stopAfter: 8000.
|
|
129
|
-
// However, this is not deterministic.
|
|
130
|
-
const expectedCount = 11002 + deletedRowOps.length;
|
|
131
|
-
expect(data.length).toEqual(expectedCount);
|
|
132
|
-
|
|
133
|
-
const replicatedCount =
|
|
134
|
-
((await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0) - startRowCount;
|
|
135
|
-
|
|
136
|
-
// With resumable replication, there should be no need to re-replicate anything.
|
|
137
|
-
expect(replicatedCount).toEqual(expectedCount);
|
|
138
147
|
}
|