@powersync/service-module-mongodb 0.0.0-dev-20241001150444
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/LICENSE +67 -0
- package/README.md +3 -0
- package/dist/api/MongoRouteAPIAdapter.d.ts +22 -0
- package/dist/api/MongoRouteAPIAdapter.js +64 -0
- package/dist/api/MongoRouteAPIAdapter.js.map +1 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +4 -0
- package/dist/index.js.map +1 -0
- package/dist/module/MongoModule.d.ts +13 -0
- package/dist/module/MongoModule.js +46 -0
- package/dist/module/MongoModule.js.map +1 -0
- package/dist/replication/ChangeStream.d.ts +53 -0
- package/dist/replication/ChangeStream.js +389 -0
- package/dist/replication/ChangeStream.js.map +1 -0
- package/dist/replication/ChangeStreamReplicationJob.d.ts +16 -0
- package/dist/replication/ChangeStreamReplicationJob.js +90 -0
- package/dist/replication/ChangeStreamReplicationJob.js.map +1 -0
- package/dist/replication/ChangeStreamReplicator.d.ts +13 -0
- package/dist/replication/ChangeStreamReplicator.js +26 -0
- package/dist/replication/ChangeStreamReplicator.js.map +1 -0
- package/dist/replication/ConnectionManagerFactory.d.ts +9 -0
- package/dist/replication/ConnectionManagerFactory.js +21 -0
- package/dist/replication/ConnectionManagerFactory.js.map +1 -0
- package/dist/replication/MongoErrorRateLimiter.d.ts +11 -0
- package/dist/replication/MongoErrorRateLimiter.js +44 -0
- package/dist/replication/MongoErrorRateLimiter.js.map +1 -0
- package/dist/replication/MongoManager.d.ts +14 -0
- package/dist/replication/MongoManager.js +36 -0
- package/dist/replication/MongoManager.js.map +1 -0
- package/dist/replication/MongoRelation.d.ts +9 -0
- package/dist/replication/MongoRelation.js +174 -0
- package/dist/replication/MongoRelation.js.map +1 -0
- package/dist/replication/replication-index.d.ts +4 -0
- package/dist/replication/replication-index.js +5 -0
- package/dist/replication/replication-index.js.map +1 -0
- package/dist/types/types.d.ts +51 -0
- package/dist/types/types.js +37 -0
- package/dist/types/types.js.map +1 -0
- package/package.json +47 -0
- package/src/api/MongoRouteAPIAdapter.ts +86 -0
- package/src/index.ts +5 -0
- package/src/module/MongoModule.ts +52 -0
- package/src/replication/ChangeStream.ts +503 -0
- package/src/replication/ChangeStreamReplicationJob.ts +104 -0
- package/src/replication/ChangeStreamReplicator.ts +36 -0
- package/src/replication/ConnectionManagerFactory.ts +27 -0
- package/src/replication/MongoErrorRateLimiter.ts +45 -0
- package/src/replication/MongoManager.ts +47 -0
- package/src/replication/MongoRelation.ts +156 -0
- package/src/replication/replication-index.ts +4 -0
- package/src/types/types.ts +65 -0
- package/test/src/change_stream.test.ts +306 -0
- package/test/src/change_stream_utils.ts +148 -0
- package/test/src/env.ts +7 -0
- package/test/src/mongo_test.test.ts +219 -0
- package/test/src/setup.ts +7 -0
- package/test/src/util.ts +52 -0
- package/test/tsconfig.json +28 -0
- package/tsconfig.json +28 -0
- package/tsconfig.tsbuildinfo +1 -0
- package/vitest.config.ts +9 -0
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import { setTimeout } from 'timers/promises';
|
|
2
|
+
import { ErrorRateLimiter } from '@powersync/service-core';
|
|
3
|
+
|
|
4
|
+
export class MongoErrorRateLimiter implements ErrorRateLimiter {
|
|
5
|
+
nextAllowed: number = Date.now();
|
|
6
|
+
|
|
7
|
+
async waitUntilAllowed(options?: { signal?: AbortSignal | undefined } | undefined): Promise<void> {
|
|
8
|
+
const delay = Math.max(0, this.nextAllowed - Date.now());
|
|
9
|
+
// Minimum delay between connections, even without errors
|
|
10
|
+
this.setDelay(500);
|
|
11
|
+
await setTimeout(delay, undefined, { signal: options?.signal });
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
mayPing(): boolean {
|
|
15
|
+
return Date.now() >= this.nextAllowed;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
reportError(e: any): void {
|
|
19
|
+
// FIXME: Check mongodb-specific requirements
|
|
20
|
+
const message = (e.message as string) ?? '';
|
|
21
|
+
if (message.includes('password authentication failed')) {
|
|
22
|
+
// Wait 15 minutes, to avoid triggering Supabase's fail2ban
|
|
23
|
+
this.setDelay(900_000);
|
|
24
|
+
} else if (message.includes('ENOTFOUND')) {
|
|
25
|
+
// DNS lookup issue - incorrect URI or deleted instance
|
|
26
|
+
this.setDelay(120_000);
|
|
27
|
+
} else if (message.includes('ECONNREFUSED')) {
|
|
28
|
+
// Could be fail2ban or similar
|
|
29
|
+
this.setDelay(120_000);
|
|
30
|
+
} else if (
|
|
31
|
+
message.includes('Unable to do postgres query on ended pool') ||
|
|
32
|
+
message.includes('Postgres unexpectedly closed connection')
|
|
33
|
+
) {
|
|
34
|
+
// Connection timed out - ignore / immediately retry
|
|
35
|
+
// We don't explicitly set the delay to 0, since there could have been another error that
|
|
36
|
+
// we need to respect.
|
|
37
|
+
} else {
|
|
38
|
+
this.setDelay(30_000);
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
private setDelay(delay: number) {
|
|
43
|
+
this.nextAllowed = Math.max(this.nextAllowed, Date.now() + delay);
|
|
44
|
+
}
|
|
45
|
+
}
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import * as mongo from 'mongodb';
|
|
2
|
+
import { NormalizedMongoConnectionConfig } from '../types/types.js';
|
|
3
|
+
|
|
4
|
+
export class MongoManager {
|
|
5
|
+
/**
|
|
6
|
+
* Do not use this for any transactions.
|
|
7
|
+
*/
|
|
8
|
+
public readonly client: mongo.MongoClient;
|
|
9
|
+
public readonly db: mongo.Db;
|
|
10
|
+
|
|
11
|
+
constructor(public options: NormalizedMongoConnectionConfig) {
|
|
12
|
+
// The pool is lazy - no connections are opened until a query is performed.
|
|
13
|
+
this.client = new mongo.MongoClient(options.uri, {
|
|
14
|
+
auth: {
|
|
15
|
+
username: options.username,
|
|
16
|
+
password: options.password
|
|
17
|
+
},
|
|
18
|
+
// Time for connection to timeout
|
|
19
|
+
connectTimeoutMS: 5_000,
|
|
20
|
+
// Time for individual requests to timeout
|
|
21
|
+
socketTimeoutMS: 60_000,
|
|
22
|
+
// How long to wait for new primary selection
|
|
23
|
+
serverSelectionTimeoutMS: 30_000,
|
|
24
|
+
|
|
25
|
+
// Avoid too many connections:
|
|
26
|
+
// 1. It can overwhelm the source database.
|
|
27
|
+
// 2. Processing too many queries in parallel can cause the process to run out of memory.
|
|
28
|
+
maxPoolSize: 8,
|
|
29
|
+
|
|
30
|
+
maxConnecting: 3,
|
|
31
|
+
maxIdleTimeMS: 60_000
|
|
32
|
+
});
|
|
33
|
+
this.db = this.client.db(options.database, {});
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
public get connectionTag() {
|
|
37
|
+
return this.options.tag;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
async end(): Promise<void> {
|
|
41
|
+
await this.client.close();
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
async destroy() {
|
|
45
|
+
// TODO: Implement?
|
|
46
|
+
}
|
|
47
|
+
}
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
import { storage } from '@powersync/service-core';
|
|
2
|
+
import { SqliteRow, SqliteValue, toSyncRulesRow } from '@powersync/service-sync-rules';
|
|
3
|
+
import * as mongo from 'mongodb';
|
|
4
|
+
import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
|
|
5
|
+
|
|
6
|
+
export function getMongoRelation(source: mongo.ChangeStreamNameSpace): storage.SourceEntityDescriptor {
|
|
7
|
+
return {
|
|
8
|
+
name: source.coll,
|
|
9
|
+
schema: source.db,
|
|
10
|
+
objectId: source.coll,
|
|
11
|
+
replicationColumns: [{ name: '_id' }]
|
|
12
|
+
} satisfies storage.SourceEntityDescriptor;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export function getMongoLsn(timestamp: mongo.Timestamp) {
|
|
16
|
+
const a = timestamp.high.toString(16).padStart(8, '0');
|
|
17
|
+
const b = timestamp.low.toString(16).padStart(8, '0');
|
|
18
|
+
return a + b;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export function mongoLsnToTimestamp(lsn: string | null) {
|
|
22
|
+
if (lsn == null) {
|
|
23
|
+
return null;
|
|
24
|
+
}
|
|
25
|
+
const a = parseInt(lsn.substring(0, 8), 16);
|
|
26
|
+
const b = parseInt(lsn.substring(8, 16), 16);
|
|
27
|
+
return mongo.Timestamp.fromBits(b, a);
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export function constructAfterRecord(document: mongo.Document): SqliteRow {
|
|
31
|
+
let record: SqliteRow = {};
|
|
32
|
+
for (let key of Object.keys(document)) {
|
|
33
|
+
record[key] = toMongoSyncRulesValue(document[key]);
|
|
34
|
+
}
|
|
35
|
+
return record;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
export function toMongoSyncRulesValue(data: any): SqliteValue {
|
|
39
|
+
const autoBigNum = true;
|
|
40
|
+
if (data == null) {
|
|
41
|
+
// null or undefined
|
|
42
|
+
return data;
|
|
43
|
+
} else if (typeof data == 'string') {
|
|
44
|
+
return data;
|
|
45
|
+
} else if (typeof data == 'number') {
|
|
46
|
+
if (Number.isInteger(data) && autoBigNum) {
|
|
47
|
+
return BigInt(data);
|
|
48
|
+
} else {
|
|
49
|
+
return data;
|
|
50
|
+
}
|
|
51
|
+
} else if (typeof data == 'bigint') {
|
|
52
|
+
return data;
|
|
53
|
+
} else if (typeof data == 'boolean') {
|
|
54
|
+
return data ? 1n : 0n;
|
|
55
|
+
} else if (data instanceof mongo.ObjectId) {
|
|
56
|
+
return data.toHexString();
|
|
57
|
+
} else if (data instanceof mongo.UUID) {
|
|
58
|
+
return data.toHexString();
|
|
59
|
+
} else if (data instanceof Date) {
|
|
60
|
+
return data.toISOString().replace('T', ' ');
|
|
61
|
+
} else if (data instanceof mongo.Binary) {
|
|
62
|
+
return new Uint8Array(data.buffer);
|
|
63
|
+
} else if (data instanceof mongo.Long) {
|
|
64
|
+
return data.toBigInt();
|
|
65
|
+
} else if (Array.isArray(data)) {
|
|
66
|
+
// We may be able to avoid some parse + stringify cycles here for JsonSqliteContainer.
|
|
67
|
+
return JSONBig.stringify(data.map((element) => filterJsonData(element)));
|
|
68
|
+
} else if (data instanceof Uint8Array) {
|
|
69
|
+
return data;
|
|
70
|
+
} else if (data instanceof JsonContainer) {
|
|
71
|
+
return data.toString();
|
|
72
|
+
} else if (typeof data == 'object') {
|
|
73
|
+
let record: Record<string, any> = {};
|
|
74
|
+
for (let key of Object.keys(data)) {
|
|
75
|
+
record[key] = filterJsonData(data[key]);
|
|
76
|
+
}
|
|
77
|
+
return JSONBig.stringify(record);
|
|
78
|
+
} else {
|
|
79
|
+
return null;
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
const DEPTH_LIMIT = 20;
|
|
84
|
+
|
|
85
|
+
function filterJsonData(data: any, depth = 0): any {
|
|
86
|
+
const autoBigNum = true;
|
|
87
|
+
if (depth > DEPTH_LIMIT) {
|
|
88
|
+
// This is primarily to prevent infinite recursion
|
|
89
|
+
throw new Error(`json nested object depth exceeds the limit of ${DEPTH_LIMIT}`);
|
|
90
|
+
}
|
|
91
|
+
if (data == null) {
|
|
92
|
+
return data; // null or undefined
|
|
93
|
+
} else if (typeof data == 'string') {
|
|
94
|
+
return data;
|
|
95
|
+
} else if (typeof data == 'number') {
|
|
96
|
+
if (autoBigNum && Number.isInteger(data)) {
|
|
97
|
+
return BigInt(data);
|
|
98
|
+
} else {
|
|
99
|
+
return data;
|
|
100
|
+
}
|
|
101
|
+
} else if (typeof data == 'boolean') {
|
|
102
|
+
return data ? 1n : 0n;
|
|
103
|
+
} else if (typeof data == 'bigint') {
|
|
104
|
+
return data;
|
|
105
|
+
} else if (data instanceof Date) {
|
|
106
|
+
return data.toISOString().replace('T', ' ');
|
|
107
|
+
} else if (data instanceof mongo.ObjectId) {
|
|
108
|
+
return data.toHexString();
|
|
109
|
+
} else if (data instanceof mongo.UUID) {
|
|
110
|
+
return data.toHexString();
|
|
111
|
+
} else if (data instanceof mongo.Binary) {
|
|
112
|
+
return undefined;
|
|
113
|
+
} else if (data instanceof mongo.Long) {
|
|
114
|
+
return data.toBigInt();
|
|
115
|
+
} else if (Array.isArray(data)) {
|
|
116
|
+
return data.map((element) => filterJsonData(element, depth + 1));
|
|
117
|
+
} else if (ArrayBuffer.isView(data)) {
|
|
118
|
+
return undefined;
|
|
119
|
+
} else if (data instanceof JsonContainer) {
|
|
120
|
+
// Can be stringified directly when using our JSONBig implementation
|
|
121
|
+
return data;
|
|
122
|
+
} else if (typeof data == 'object') {
|
|
123
|
+
let record: Record<string, any> = {};
|
|
124
|
+
for (let key of Object.keys(data)) {
|
|
125
|
+
record[key] = filterJsonData(data[key], depth + 1);
|
|
126
|
+
}
|
|
127
|
+
return record;
|
|
128
|
+
} else {
|
|
129
|
+
return undefined;
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
export async function createCheckpoint(client: mongo.MongoClient, db: mongo.Db): Promise<string> {
|
|
134
|
+
const session = client.startSession();
|
|
135
|
+
try {
|
|
136
|
+
const result = await db.collection('_powersync_checkpoints').findOneAndUpdate(
|
|
137
|
+
{
|
|
138
|
+
_id: 'checkpoint' as any
|
|
139
|
+
},
|
|
140
|
+
{
|
|
141
|
+
$inc: { i: 1 }
|
|
142
|
+
},
|
|
143
|
+
{
|
|
144
|
+
upsert: true,
|
|
145
|
+
returnDocument: 'after',
|
|
146
|
+
session
|
|
147
|
+
}
|
|
148
|
+
);
|
|
149
|
+
const time = session.operationTime!;
|
|
150
|
+
// console.log('marked checkpoint at', time, getMongoLsn(time));
|
|
151
|
+
// TODO: Use the above when we support custom write checkpoints
|
|
152
|
+
return getMongoLsn(time);
|
|
153
|
+
} finally {
|
|
154
|
+
await session.endSession();
|
|
155
|
+
}
|
|
156
|
+
}
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
import { normalizeMongoConfig } from '@powersync/service-core';
|
|
2
|
+
import * as service_types from '@powersync/service-types';
|
|
3
|
+
import * as t from 'ts-codec';
|
|
4
|
+
|
|
5
|
+
export const MONGO_CONNECTION_TYPE = 'mongodb' as const;
|
|
6
|
+
|
|
7
|
+
export interface NormalizedMongoConnectionConfig {
|
|
8
|
+
id: string;
|
|
9
|
+
tag: string;
|
|
10
|
+
|
|
11
|
+
uri: string;
|
|
12
|
+
database: string;
|
|
13
|
+
|
|
14
|
+
username?: string;
|
|
15
|
+
password?: string;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
export const MongoConnectionConfig = service_types.configFile.dataSourceConfig.and(
|
|
19
|
+
t.object({
|
|
20
|
+
type: t.literal(MONGO_CONNECTION_TYPE),
|
|
21
|
+
/** Unique identifier for the connection - optional when a single connection is present. */
|
|
22
|
+
id: t.string.optional(),
|
|
23
|
+
/** Tag used as reference in sync rules. Defaults to "default". Does not have to be unique. */
|
|
24
|
+
tag: t.string.optional(),
|
|
25
|
+
uri: t.string,
|
|
26
|
+
username: t.string.optional(),
|
|
27
|
+
password: t.string.optional(),
|
|
28
|
+
database: t.string.optional()
|
|
29
|
+
})
|
|
30
|
+
);
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Config input specified when starting services
|
|
34
|
+
*/
|
|
35
|
+
export type MongoConnectionConfig = t.Decoded<typeof MongoConnectionConfig>;
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Resolved version of {@link MongoConnectionConfig}
|
|
39
|
+
*/
|
|
40
|
+
export type ResolvedConnectionConfig = MongoConnectionConfig & NormalizedMongoConnectionConfig;
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Validate and normalize connection options.
|
|
44
|
+
*
|
|
45
|
+
* Returns destructured options.
|
|
46
|
+
*/
|
|
47
|
+
export function normalizeConnectionConfig(options: MongoConnectionConfig): NormalizedMongoConnectionConfig {
|
|
48
|
+
const base = normalizeMongoConfig(options);
|
|
49
|
+
|
|
50
|
+
return {
|
|
51
|
+
id: options.id ?? 'default',
|
|
52
|
+
tag: options.tag ?? 'default',
|
|
53
|
+
|
|
54
|
+
...base
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Construct a mongodb URI, without username, password or ssl options.
|
|
60
|
+
*
|
|
61
|
+
* Only contains hostname, port, database.
|
|
62
|
+
*/
|
|
63
|
+
export function baseUri(options: NormalizedMongoConnectionConfig) {
|
|
64
|
+
return options.uri;
|
|
65
|
+
}
|
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
import { putOp, removeOp } from '@core-tests/stream_utils.js';
|
|
2
|
+
import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js';
|
|
3
|
+
import { BucketStorageFactory } from '@powersync/service-core';
|
|
4
|
+
import * as crypto from 'crypto';
|
|
5
|
+
import { describe, expect, test } from 'vitest';
|
|
6
|
+
import { walStreamTest } from './change_stream_utils.js';
|
|
7
|
+
import * as mongo from 'mongodb';
|
|
8
|
+
import { setTimeout } from 'node:timers/promises';
|
|
9
|
+
|
|
10
|
+
type StorageFactory = () => Promise<BucketStorageFactory>;
|
|
11
|
+
|
|
12
|
+
const BASIC_SYNC_RULES = `
|
|
13
|
+
bucket_definitions:
|
|
14
|
+
global:
|
|
15
|
+
data:
|
|
16
|
+
- SELECT _id as id, description FROM "test_data"
|
|
17
|
+
`;
|
|
18
|
+
|
|
19
|
+
describe(
|
|
20
|
+
'change stream - mongodb',
|
|
21
|
+
function () {
|
|
22
|
+
defineWalStreamTests(MONGO_STORAGE_FACTORY);
|
|
23
|
+
},
|
|
24
|
+
{ timeout: 20_000 }
|
|
25
|
+
);
|
|
26
|
+
|
|
27
|
+
function defineWalStreamTests(factory: StorageFactory) {
|
|
28
|
+
test(
|
|
29
|
+
'replicating basic values',
|
|
30
|
+
walStreamTest(factory, async (context) => {
|
|
31
|
+
const { db } = context;
|
|
32
|
+
await context.updateSyncRules(`
|
|
33
|
+
bucket_definitions:
|
|
34
|
+
global:
|
|
35
|
+
data:
|
|
36
|
+
- SELECT _id as id, description, num FROM "test_data"`);
|
|
37
|
+
|
|
38
|
+
db.createCollection('test_data', {
|
|
39
|
+
changeStreamPreAndPostImages: { enabled: true }
|
|
40
|
+
});
|
|
41
|
+
const collection = db.collection('test_data');
|
|
42
|
+
|
|
43
|
+
await context.replicateSnapshot();
|
|
44
|
+
|
|
45
|
+
context.startStreaming();
|
|
46
|
+
|
|
47
|
+
const result = await collection.insertOne({ description: 'test1', num: 1152921504606846976n });
|
|
48
|
+
const test_id = result.insertedId;
|
|
49
|
+
await setTimeout(10);
|
|
50
|
+
await collection.updateOne({ _id: test_id }, { $set: { description: 'test2' } });
|
|
51
|
+
await setTimeout(10);
|
|
52
|
+
await collection.replaceOne({ _id: test_id }, { description: 'test3' });
|
|
53
|
+
await setTimeout(10);
|
|
54
|
+
await collection.deleteOne({ _id: test_id });
|
|
55
|
+
|
|
56
|
+
const data = await context.getBucketData('global[]');
|
|
57
|
+
|
|
58
|
+
expect(data).toMatchObject([
|
|
59
|
+
putOp('test_data', { id: test_id.toHexString(), description: 'test1', num: 1152921504606846976n }),
|
|
60
|
+
putOp('test_data', { id: test_id.toHexString(), description: 'test2', num: 1152921504606846976n }),
|
|
61
|
+
putOp('test_data', { id: test_id.toHexString(), description: 'test3' }),
|
|
62
|
+
removeOp('test_data', test_id.toHexString())
|
|
63
|
+
]);
|
|
64
|
+
})
|
|
65
|
+
);
|
|
66
|
+
|
|
67
|
+
test(
|
|
68
|
+
'no fullDocument available',
|
|
69
|
+
walStreamTest(factory, async (context) => {
|
|
70
|
+
const { db, client } = context;
|
|
71
|
+
await context.updateSyncRules(`
|
|
72
|
+
bucket_definitions:
|
|
73
|
+
global:
|
|
74
|
+
data:
|
|
75
|
+
- SELECT _id as id, description, num FROM "test_data"`);
|
|
76
|
+
|
|
77
|
+
db.createCollection('test_data', {
|
|
78
|
+
changeStreamPreAndPostImages: { enabled: false }
|
|
79
|
+
});
|
|
80
|
+
const collection = db.collection('test_data');
|
|
81
|
+
|
|
82
|
+
await context.replicateSnapshot();
|
|
83
|
+
|
|
84
|
+
context.startStreaming();
|
|
85
|
+
|
|
86
|
+
const session = client.startSession();
|
|
87
|
+
let test_id: mongo.ObjectId | undefined;
|
|
88
|
+
try {
|
|
89
|
+
await session.withTransaction(async () => {
|
|
90
|
+
const result = await collection.insertOne({ description: 'test1', num: 1152921504606846976n }, { session });
|
|
91
|
+
test_id = result.insertedId;
|
|
92
|
+
await collection.updateOne({ _id: test_id }, { $set: { description: 'test2' } }, { session });
|
|
93
|
+
await collection.replaceOne({ _id: test_id }, { description: 'test3' }, { session });
|
|
94
|
+
await collection.deleteOne({ _id: test_id }, { session });
|
|
95
|
+
});
|
|
96
|
+
} finally {
|
|
97
|
+
await session.endSession();
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
const data = await context.getBucketData('global[]');
|
|
101
|
+
|
|
102
|
+
expect(data).toMatchObject([
|
|
103
|
+
putOp('test_data', { id: test_id!.toHexString(), description: 'test1', num: 1152921504606846976n }),
|
|
104
|
+
// fullDocument is not available at the point this is replicated, resulting in it treated as a remove
|
|
105
|
+
removeOp('test_data', test_id!.toHexString()),
|
|
106
|
+
putOp('test_data', { id: test_id!.toHexString(), description: 'test3' }),
|
|
107
|
+
removeOp('test_data', test_id!.toHexString())
|
|
108
|
+
]);
|
|
109
|
+
})
|
|
110
|
+
);
|
|
111
|
+
|
|
112
|
+
test(
|
|
113
|
+
'replicating case sensitive table',
|
|
114
|
+
walStreamTest(factory, async (context) => {
|
|
115
|
+
const { db } = context;
|
|
116
|
+
await context.updateSyncRules(`
|
|
117
|
+
bucket_definitions:
|
|
118
|
+
global:
|
|
119
|
+
data:
|
|
120
|
+
- SELECT _id as id, description FROM "test_DATA"
|
|
121
|
+
`);
|
|
122
|
+
|
|
123
|
+
await context.replicateSnapshot();
|
|
124
|
+
|
|
125
|
+
context.startStreaming();
|
|
126
|
+
|
|
127
|
+
const collection = db.collection('test_DATA');
|
|
128
|
+
const result = await collection.insertOne({ description: 'test1' });
|
|
129
|
+
const test_id = result.insertedId.toHexString();
|
|
130
|
+
|
|
131
|
+
const data = await context.getBucketData('global[]');
|
|
132
|
+
|
|
133
|
+
expect(data).toMatchObject([putOp('test_DATA', { id: test_id, description: 'test1' })]);
|
|
134
|
+
})
|
|
135
|
+
);
|
|
136
|
+
|
|
137
|
+
test(
|
|
138
|
+
'replicating large values',
|
|
139
|
+
walStreamTest(factory, async (context) => {
|
|
140
|
+
const { db } = context;
|
|
141
|
+
await context.updateSyncRules(`
|
|
142
|
+
bucket_definitions:
|
|
143
|
+
global:
|
|
144
|
+
data:
|
|
145
|
+
- SELECT _id as id, name, description FROM "test_data"
|
|
146
|
+
`);
|
|
147
|
+
|
|
148
|
+
await context.replicateSnapshot();
|
|
149
|
+
context.startStreaming();
|
|
150
|
+
|
|
151
|
+
const largeDescription = crypto.randomBytes(20_000).toString('hex');
|
|
152
|
+
|
|
153
|
+
const collection = db.collection('test_data');
|
|
154
|
+
const result = await collection.insertOne({ name: 'test1', description: largeDescription });
|
|
155
|
+
const test_id = result.insertedId;
|
|
156
|
+
|
|
157
|
+
await collection.updateOne({ _id: test_id }, { $set: { name: 'test2' } });
|
|
158
|
+
|
|
159
|
+
const data = await context.getBucketData('global[]');
|
|
160
|
+
expect(data.slice(0, 1)).toMatchObject([
|
|
161
|
+
putOp('test_data', { id: test_id.toHexString(), name: 'test1', description: largeDescription })
|
|
162
|
+
]);
|
|
163
|
+
expect(data.slice(1)).toMatchObject([
|
|
164
|
+
putOp('test_data', { id: test_id.toHexString(), name: 'test2', description: largeDescription })
|
|
165
|
+
]);
|
|
166
|
+
})
|
|
167
|
+
);
|
|
168
|
+
|
|
169
|
+
test(
|
|
170
|
+
'replicating dropCollection',
|
|
171
|
+
walStreamTest(factory, async (context) => {
|
|
172
|
+
const { db } = context;
|
|
173
|
+
const syncRuleContent = `
|
|
174
|
+
bucket_definitions:
|
|
175
|
+
global:
|
|
176
|
+
data:
|
|
177
|
+
- SELECT _id as id, description FROM "test_data"
|
|
178
|
+
by_test_data:
|
|
179
|
+
parameters: SELECT _id as id FROM test_data WHERE id = token_parameters.user_id
|
|
180
|
+
data: []
|
|
181
|
+
`;
|
|
182
|
+
await context.updateSyncRules(syncRuleContent);
|
|
183
|
+
await context.replicateSnapshot();
|
|
184
|
+
context.startStreaming();
|
|
185
|
+
|
|
186
|
+
const collection = db.collection('test_data');
|
|
187
|
+
const result = await collection.insertOne({ description: 'test1' });
|
|
188
|
+
const test_id = result.insertedId.toHexString();
|
|
189
|
+
|
|
190
|
+
await collection.drop();
|
|
191
|
+
|
|
192
|
+
const data = await context.getBucketData('global[]');
|
|
193
|
+
|
|
194
|
+
expect(data).toMatchObject([
|
|
195
|
+
putOp('test_data', { id: test_id, description: 'test1' }),
|
|
196
|
+
removeOp('test_data', test_id)
|
|
197
|
+
]);
|
|
198
|
+
})
|
|
199
|
+
);
|
|
200
|
+
|
|
201
|
+
test(
|
|
202
|
+
'replicating renameCollection',
|
|
203
|
+
walStreamTest(factory, async (context) => {
|
|
204
|
+
const { db } = context;
|
|
205
|
+
const syncRuleContent = `
|
|
206
|
+
bucket_definitions:
|
|
207
|
+
global:
|
|
208
|
+
data:
|
|
209
|
+
- SELECT _id as id, description FROM "test_data1"
|
|
210
|
+
- SELECT _id as id, description FROM "test_data2"
|
|
211
|
+
`;
|
|
212
|
+
await context.updateSyncRules(syncRuleContent);
|
|
213
|
+
await context.replicateSnapshot();
|
|
214
|
+
context.startStreaming();
|
|
215
|
+
|
|
216
|
+
console.log('insert1', db.databaseName);
|
|
217
|
+
const collection = db.collection('test_data1');
|
|
218
|
+
const result = await collection.insertOne({ description: 'test1' });
|
|
219
|
+
const test_id = result.insertedId.toHexString();
|
|
220
|
+
|
|
221
|
+
await collection.rename('test_data2');
|
|
222
|
+
|
|
223
|
+
const data = await context.getBucketData('global[]');
|
|
224
|
+
|
|
225
|
+
expect(data).toMatchObject([
|
|
226
|
+
putOp('test_data1', { id: test_id, description: 'test1' }),
|
|
227
|
+
removeOp('test_data1', test_id),
|
|
228
|
+
putOp('test_data2', { id: test_id, description: 'test1' })
|
|
229
|
+
]);
|
|
230
|
+
})
|
|
231
|
+
);
|
|
232
|
+
|
|
233
|
+
test(
|
|
234
|
+
'initial sync',
|
|
235
|
+
walStreamTest(factory, async (context) => {
|
|
236
|
+
const { db } = context;
|
|
237
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
238
|
+
|
|
239
|
+
const collection = db.collection('test_data');
|
|
240
|
+
const result = await collection.insertOne({ description: 'test1' });
|
|
241
|
+
const test_id = result.insertedId.toHexString();
|
|
242
|
+
|
|
243
|
+
await context.replicateSnapshot();
|
|
244
|
+
context.startStreaming();
|
|
245
|
+
|
|
246
|
+
const data = await context.getBucketData('global[]');
|
|
247
|
+
expect(data).toMatchObject([putOp('test_data', { id: test_id, description: 'test1' })]);
|
|
248
|
+
})
|
|
249
|
+
);
|
|
250
|
+
|
|
251
|
+
// Not correctly implemented yet
|
|
252
|
+
test.skip(
|
|
253
|
+
'large record',
|
|
254
|
+
walStreamTest(factory, async (context) => {
|
|
255
|
+
await context.updateSyncRules(`bucket_definitions:
|
|
256
|
+
global:
|
|
257
|
+
data:
|
|
258
|
+
- SELECT _id as id, description, other FROM "test_data"`);
|
|
259
|
+
const { db } = context;
|
|
260
|
+
|
|
261
|
+
await context.replicateSnapshot();
|
|
262
|
+
|
|
263
|
+
// 16MB
|
|
264
|
+
const largeDescription = crypto.randomBytes(8_000_000 - 100).toString('hex');
|
|
265
|
+
|
|
266
|
+
const collection = db.collection('test_data');
|
|
267
|
+
const result = await collection.insertOne({ description: largeDescription });
|
|
268
|
+
const test_id = result.insertedId;
|
|
269
|
+
|
|
270
|
+
await collection.updateOne({ _id: test_id }, { $set: { name: 't2' } });
|
|
271
|
+
context.startStreaming();
|
|
272
|
+
|
|
273
|
+
const data = await context.getBucketData('global[]');
|
|
274
|
+
expect(data.length).toEqual(2);
|
|
275
|
+
const row = JSON.parse(data[0].data as string);
|
|
276
|
+
delete row.description;
|
|
277
|
+
expect(row).toEqual({ id: test_id.toHexString() });
|
|
278
|
+
delete data[0].data;
|
|
279
|
+
expect(data[0]).toMatchObject({
|
|
280
|
+
object_id: test_id.toHexString(),
|
|
281
|
+
object_type: 'test_data',
|
|
282
|
+
op: 'PUT',
|
|
283
|
+
op_id: '1'
|
|
284
|
+
});
|
|
285
|
+
})
|
|
286
|
+
);
|
|
287
|
+
|
|
288
|
+
test(
|
|
289
|
+
'table not in sync rules',
|
|
290
|
+
walStreamTest(factory, async (context) => {
|
|
291
|
+
const { db } = context;
|
|
292
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
293
|
+
|
|
294
|
+
await context.replicateSnapshot();
|
|
295
|
+
|
|
296
|
+
context.startStreaming();
|
|
297
|
+
|
|
298
|
+
const collection = db.collection('test_donotsync');
|
|
299
|
+
const result = await collection.insertOne({ description: 'test' });
|
|
300
|
+
|
|
301
|
+
const data = await context.getBucketData('global[]');
|
|
302
|
+
|
|
303
|
+
expect(data).toMatchObject([]);
|
|
304
|
+
})
|
|
305
|
+
);
|
|
306
|
+
}
|