@powersync/service-module-mongodb 0.0.0-dev-20241219153510 → 0.0.0-dev-20250108073049
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +19 -6
- package/dist/api/MongoRouteAPIAdapter.d.ts +1 -1
- package/dist/api/MongoRouteAPIAdapter.js +4 -3
- package/dist/api/MongoRouteAPIAdapter.js.map +1 -1
- package/dist/index.d.ts +3 -2
- package/dist/index.js +3 -2
- package/dist/index.js.map +1 -1
- package/dist/module/MongoModule.d.ts +0 -1
- package/dist/module/MongoModule.js +2 -10
- package/dist/module/MongoModule.js.map +1 -1
- package/dist/replication/MongoRelation.js +15 -5
- package/dist/replication/MongoRelation.js.map +1 -1
- package/dist/types/types.d.ts +16 -22
- package/dist/types/types.js +4 -24
- package/dist/types/types.js.map +1 -1
- package/package.json +9 -10
- package/src/api/MongoRouteAPIAdapter.ts +4 -4
- package/src/index.ts +3 -4
- package/src/module/MongoModule.ts +2 -14
- package/src/replication/MongoRelation.ts +13 -5
- package/src/types/types.ts +8 -34
- package/test/src/change_stream.test.ts +2 -4
- package/test/src/env.ts +1 -1
- package/test/src/mongo_test.test.ts +69 -10
- package/test/src/setup.ts +4 -1
- package/test/src/slow_tests.test.ts +5 -12
- package/test/src/util.ts +5 -38
- package/test/tsconfig.json +1 -2
- package/tsconfig.json +6 -0
- package/tsconfig.tsbuildinfo +1 -1
- package/dist/db/db-index.d.ts +0 -1
- package/dist/db/db-index.js +0 -2
- package/dist/db/db-index.js.map +0 -1
- package/dist/db/mongo.d.ts +0 -35
- package/dist/db/mongo.js +0 -73
- package/dist/db/mongo.js.map +0 -1
- package/dist/locks/MonogLocks.d.ts +0 -36
- package/dist/locks/MonogLocks.js +0 -83
- package/dist/locks/MonogLocks.js.map +0 -1
- package/dist/migrations/MonogMigrationAgent.d.ts +0 -12
- package/dist/migrations/MonogMigrationAgent.js +0 -25
- package/dist/migrations/MonogMigrationAgent.js.map +0 -1
- package/dist/migrations/db/migrations/1684951997326-init.d.ts +0 -3
- package/dist/migrations/db/migrations/1684951997326-init.js +0 -30
- package/dist/migrations/db/migrations/1684951997326-init.js.map +0 -1
- package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.d.ts +0 -2
- package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.js +0 -5
- package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.js.map +0 -1
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.d.ts +0 -3
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.js +0 -54
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.js.map +0 -1
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.d.ts +0 -3
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.js +0 -26
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.js.map +0 -1
- package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.d.ts +0 -3
- package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.js +0 -28
- package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.js.map +0 -1
- package/dist/migrations/mongo-migration-store.d.ts +0 -7
- package/dist/migrations/mongo-migration-store.js +0 -49
- package/dist/migrations/mongo-migration-store.js.map +0 -1
- package/dist/storage/MongoBucketStorage.d.ts +0 -48
- package/dist/storage/MongoBucketStorage.js +0 -425
- package/dist/storage/MongoBucketStorage.js.map +0 -1
- package/dist/storage/implementation/MongoBucketBatch.d.ts +0 -72
- package/dist/storage/implementation/MongoBucketBatch.js +0 -681
- package/dist/storage/implementation/MongoBucketBatch.js.map +0 -1
- package/dist/storage/implementation/MongoCompactor.d.ts +0 -40
- package/dist/storage/implementation/MongoCompactor.js +0 -310
- package/dist/storage/implementation/MongoCompactor.js.map +0 -1
- package/dist/storage/implementation/MongoIdSequence.d.ts +0 -12
- package/dist/storage/implementation/MongoIdSequence.js +0 -21
- package/dist/storage/implementation/MongoIdSequence.js.map +0 -1
- package/dist/storage/implementation/MongoPersistedSyncRules.d.ts +0 -9
- package/dist/storage/implementation/MongoPersistedSyncRules.js +0 -9
- package/dist/storage/implementation/MongoPersistedSyncRules.js.map +0 -1
- package/dist/storage/implementation/MongoPersistedSyncRulesContent.d.ts +0 -20
- package/dist/storage/implementation/MongoPersistedSyncRulesContent.js +0 -26
- package/dist/storage/implementation/MongoPersistedSyncRulesContent.js.map +0 -1
- package/dist/storage/implementation/MongoStorageProvider.d.ts +0 -6
- package/dist/storage/implementation/MongoStorageProvider.js +0 -34
- package/dist/storage/implementation/MongoStorageProvider.js.map +0 -1
- package/dist/storage/implementation/MongoSyncBucketStorage.d.ts +0 -36
- package/dist/storage/implementation/MongoSyncBucketStorage.js +0 -529
- package/dist/storage/implementation/MongoSyncBucketStorage.js.map +0 -1
- package/dist/storage/implementation/MongoSyncRulesLock.d.ts +0 -16
- package/dist/storage/implementation/MongoSyncRulesLock.js +0 -65
- package/dist/storage/implementation/MongoSyncRulesLock.js.map +0 -1
- package/dist/storage/implementation/MongoWriteCheckpointAPI.d.ts +0 -20
- package/dist/storage/implementation/MongoWriteCheckpointAPI.js +0 -104
- package/dist/storage/implementation/MongoWriteCheckpointAPI.js.map +0 -1
- package/dist/storage/implementation/OperationBatch.d.ts +0 -34
- package/dist/storage/implementation/OperationBatch.js +0 -119
- package/dist/storage/implementation/OperationBatch.js.map +0 -1
- package/dist/storage/implementation/PersistedBatch.d.ts +0 -46
- package/dist/storage/implementation/PersistedBatch.js +0 -223
- package/dist/storage/implementation/PersistedBatch.js.map +0 -1
- package/dist/storage/implementation/config.d.ts +0 -19
- package/dist/storage/implementation/config.js +0 -26
- package/dist/storage/implementation/config.js.map +0 -1
- package/dist/storage/implementation/db.d.ts +0 -36
- package/dist/storage/implementation/db.js +0 -47
- package/dist/storage/implementation/db.js.map +0 -1
- package/dist/storage/implementation/models.d.ts +0 -139
- package/dist/storage/implementation/models.js +0 -2
- package/dist/storage/implementation/models.js.map +0 -1
- package/dist/storage/implementation/util.d.ts +0 -41
- package/dist/storage/implementation/util.js +0 -141
- package/dist/storage/implementation/util.js.map +0 -1
- package/dist/storage/storage-index.d.ts +0 -14
- package/dist/storage/storage-index.js +0 -15
- package/dist/storage/storage-index.js.map +0 -1
- package/src/db/db-index.ts +0 -1
- package/src/db/mongo.ts +0 -81
- package/src/locks/MonogLocks.ts +0 -147
- package/src/migrations/MonogMigrationAgent.ts +0 -39
- package/src/migrations/db/migrations/1684951997326-init.ts +0 -39
- package/src/migrations/db/migrations/1688556755264-initial-sync-rules.ts +0 -5
- package/src/migrations/db/migrations/1702295701188-sync-rule-state.ts +0 -105
- package/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts +0 -38
- package/src/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.ts +0 -40
- package/src/migrations/mongo-migration-store.ts +0 -62
- package/src/storage/MongoBucketStorage.ts +0 -530
- package/src/storage/implementation/MongoBucketBatch.ts +0 -896
- package/src/storage/implementation/MongoCompactor.ts +0 -392
- package/src/storage/implementation/MongoIdSequence.ts +0 -24
- package/src/storage/implementation/MongoPersistedSyncRules.ts +0 -16
- package/src/storage/implementation/MongoPersistedSyncRulesContent.ts +0 -49
- package/src/storage/implementation/MongoStorageProvider.ts +0 -42
- package/src/storage/implementation/MongoSyncBucketStorage.ts +0 -612
- package/src/storage/implementation/MongoSyncRulesLock.ts +0 -88
- package/src/storage/implementation/MongoWriteCheckpointAPI.ts +0 -146
- package/src/storage/implementation/OperationBatch.ts +0 -129
- package/src/storage/implementation/PersistedBatch.ts +0 -283
- package/src/storage/implementation/config.ts +0 -40
- package/src/storage/implementation/db.ts +0 -87
- package/src/storage/implementation/models.ts +0 -161
- package/src/storage/implementation/util.ts +0 -154
- package/src/storage/storage-index.ts +0 -14
- package/test/src/__snapshots__/storage_sync.test.ts.snap +0 -332
- package/test/src/storage.test.ts +0 -7
- package/test/src/storage_compacting.test.ts +0 -6
- package/test/src/storage_sync.test.ts +0 -113
|
@@ -1,146 +0,0 @@
|
|
|
1
|
-
import * as framework from '@powersync/lib-services-framework';
|
|
2
|
-
import { storage } from '@powersync/service-core';
|
|
3
|
-
import { PowerSyncMongo } from './db.js';
|
|
4
|
-
import { safeBulkWrite } from './util.js';
|
|
5
|
-
|
|
6
|
-
export type MongoCheckpointAPIOptions = {
|
|
7
|
-
db: PowerSyncMongo;
|
|
8
|
-
mode: storage.WriteCheckpointMode;
|
|
9
|
-
};
|
|
10
|
-
|
|
11
|
-
export class MongoWriteCheckpointAPI implements storage.WriteCheckpointAPI {
|
|
12
|
-
readonly db: PowerSyncMongo;
|
|
13
|
-
private _mode: storage.WriteCheckpointMode;
|
|
14
|
-
|
|
15
|
-
constructor(options: MongoCheckpointAPIOptions) {
|
|
16
|
-
this.db = options.db;
|
|
17
|
-
this._mode = options.mode;
|
|
18
|
-
}
|
|
19
|
-
|
|
20
|
-
get writeCheckpointMode() {
|
|
21
|
-
return this._mode;
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
setWriteCheckpointMode(mode: storage.WriteCheckpointMode): void {
|
|
25
|
-
this._mode = mode;
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
async batchCreateCustomWriteCheckpoints(checkpoints: storage.CustomWriteCheckpointOptions[]): Promise<void> {
|
|
29
|
-
return batchCreateCustomWriteCheckpoints(this.db, checkpoints);
|
|
30
|
-
}
|
|
31
|
-
|
|
32
|
-
async createCustomWriteCheckpoint(options: storage.CustomWriteCheckpointOptions): Promise<bigint> {
|
|
33
|
-
if (this.writeCheckpointMode !== storage.WriteCheckpointMode.CUSTOM) {
|
|
34
|
-
throw new framework.errors.ValidationError(
|
|
35
|
-
`Creating a custom Write Checkpoint when the current Write Checkpoint mode is set to "${this.writeCheckpointMode}"`
|
|
36
|
-
);
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
const { checkpoint, user_id, sync_rules_id } = options;
|
|
40
|
-
const doc = await this.db.custom_write_checkpoints.findOneAndUpdate(
|
|
41
|
-
{
|
|
42
|
-
user_id: user_id,
|
|
43
|
-
sync_rules_id
|
|
44
|
-
},
|
|
45
|
-
{
|
|
46
|
-
$set: {
|
|
47
|
-
checkpoint
|
|
48
|
-
}
|
|
49
|
-
},
|
|
50
|
-
{ upsert: true, returnDocument: 'after' }
|
|
51
|
-
);
|
|
52
|
-
return doc!.checkpoint;
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
async createManagedWriteCheckpoint(checkpoint: storage.ManagedWriteCheckpointOptions): Promise<bigint> {
|
|
56
|
-
if (this.writeCheckpointMode !== storage.WriteCheckpointMode.MANAGED) {
|
|
57
|
-
throw new framework.errors.ValidationError(
|
|
58
|
-
`Attempting to create a managed Write Checkpoint when the current Write Checkpoint mode is set to "${this.writeCheckpointMode}"`
|
|
59
|
-
);
|
|
60
|
-
}
|
|
61
|
-
|
|
62
|
-
const { user_id, heads: lsns } = checkpoint;
|
|
63
|
-
const doc = await this.db.write_checkpoints.findOneAndUpdate(
|
|
64
|
-
{
|
|
65
|
-
user_id: user_id
|
|
66
|
-
},
|
|
67
|
-
{
|
|
68
|
-
$set: {
|
|
69
|
-
lsns
|
|
70
|
-
},
|
|
71
|
-
$inc: {
|
|
72
|
-
client_id: 1n
|
|
73
|
-
}
|
|
74
|
-
},
|
|
75
|
-
{ upsert: true, returnDocument: 'after' }
|
|
76
|
-
);
|
|
77
|
-
return doc!.client_id;
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
async lastWriteCheckpoint(filters: storage.LastWriteCheckpointFilters): Promise<bigint | null> {
|
|
81
|
-
switch (this.writeCheckpointMode) {
|
|
82
|
-
case storage.WriteCheckpointMode.CUSTOM:
|
|
83
|
-
if (false == 'sync_rules_id' in filters) {
|
|
84
|
-
throw new framework.errors.ValidationError(`Sync rules ID is required for custom Write Checkpoint filtering`);
|
|
85
|
-
}
|
|
86
|
-
return this.lastCustomWriteCheckpoint(filters);
|
|
87
|
-
case storage.WriteCheckpointMode.MANAGED:
|
|
88
|
-
if (false == 'heads' in filters) {
|
|
89
|
-
throw new framework.errors.ValidationError(
|
|
90
|
-
`Replication HEAD is required for managed Write Checkpoint filtering`
|
|
91
|
-
);
|
|
92
|
-
}
|
|
93
|
-
return this.lastManagedWriteCheckpoint(filters);
|
|
94
|
-
}
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
protected async lastCustomWriteCheckpoint(filters: storage.CustomWriteCheckpointFilters) {
|
|
98
|
-
const { user_id, sync_rules_id } = filters;
|
|
99
|
-
const lastWriteCheckpoint = await this.db.custom_write_checkpoints.findOne({
|
|
100
|
-
user_id,
|
|
101
|
-
sync_rules_id
|
|
102
|
-
});
|
|
103
|
-
return lastWriteCheckpoint?.checkpoint ?? null;
|
|
104
|
-
}
|
|
105
|
-
|
|
106
|
-
protected async lastManagedWriteCheckpoint(filters: storage.ManagedWriteCheckpointFilters) {
|
|
107
|
-
const { user_id, heads } = filters;
|
|
108
|
-
// TODO: support multiple heads when we need to support multiple connections
|
|
109
|
-
const lsn = heads['1'];
|
|
110
|
-
if (lsn == null) {
|
|
111
|
-
// Can happen if we haven't replicated anything yet.
|
|
112
|
-
return null;
|
|
113
|
-
}
|
|
114
|
-
const lastWriteCheckpoint = await this.db.write_checkpoints.findOne({
|
|
115
|
-
user_id: user_id,
|
|
116
|
-
'lsns.1': { $lte: lsn }
|
|
117
|
-
});
|
|
118
|
-
return lastWriteCheckpoint?.client_id ?? null;
|
|
119
|
-
}
|
|
120
|
-
}
|
|
121
|
-
|
|
122
|
-
export async function batchCreateCustomWriteCheckpoints(
|
|
123
|
-
db: PowerSyncMongo,
|
|
124
|
-
checkpoints: storage.CustomWriteCheckpointOptions[]
|
|
125
|
-
): Promise<void> {
|
|
126
|
-
if (!checkpoints.length) {
|
|
127
|
-
return;
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
await safeBulkWrite(
|
|
131
|
-
db.custom_write_checkpoints,
|
|
132
|
-
checkpoints.map((checkpointOptions) => ({
|
|
133
|
-
updateOne: {
|
|
134
|
-
filter: { user_id: checkpointOptions.user_id, sync_rules_id: checkpointOptions.sync_rules_id },
|
|
135
|
-
update: {
|
|
136
|
-
$set: {
|
|
137
|
-
checkpoint: checkpointOptions.checkpoint,
|
|
138
|
-
sync_rules_id: checkpointOptions.sync_rules_id
|
|
139
|
-
}
|
|
140
|
-
},
|
|
141
|
-
upsert: true
|
|
142
|
-
}
|
|
143
|
-
})),
|
|
144
|
-
{}
|
|
145
|
-
);
|
|
146
|
-
}
|
|
@@ -1,129 +0,0 @@
|
|
|
1
|
-
import { ToastableSqliteRow } from '@powersync/service-sync-rules';
|
|
2
|
-
import * as bson from 'bson';
|
|
3
|
-
|
|
4
|
-
import { storage } from '@powersync/service-core';
|
|
5
|
-
|
|
6
|
-
/**
|
|
7
|
-
* Maximum number of operations in a batch.
|
|
8
|
-
*/
|
|
9
|
-
const MAX_BATCH_COUNT = 2000;
|
|
10
|
-
|
|
11
|
-
/**
|
|
12
|
-
* Maximum size of operations in the batch (estimated).
|
|
13
|
-
*/
|
|
14
|
-
const MAX_RECORD_BATCH_SIZE = 5_000_000;
|
|
15
|
-
|
|
16
|
-
/**
|
|
17
|
-
* Maximum size of size of current_data documents we lookup at a time.
|
|
18
|
-
*/
|
|
19
|
-
const MAX_CURRENT_DATA_BATCH_SIZE = 16_000_000;
|
|
20
|
-
|
|
21
|
-
/**
|
|
22
|
-
* Batch of input operations.
|
|
23
|
-
*
|
|
24
|
-
* We accumulate operations up to MAX_RECORD_BATCH_SIZE,
|
|
25
|
-
* then further split into sub-batches if MAX_CURRENT_DATA_BATCH_SIZE is exceeded.
|
|
26
|
-
*/
|
|
27
|
-
export class OperationBatch {
|
|
28
|
-
batch: RecordOperation[] = [];
|
|
29
|
-
currentSize: number = 0;
|
|
30
|
-
|
|
31
|
-
get length() {
|
|
32
|
-
return this.batch.length;
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
push(op: RecordOperation) {
|
|
36
|
-
this.batch.push(op);
|
|
37
|
-
this.currentSize += op.estimatedSize;
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
shouldFlush() {
|
|
41
|
-
return this.batch.length >= MAX_BATCH_COUNT || this.currentSize > MAX_RECORD_BATCH_SIZE;
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
/**
|
|
45
|
-
*
|
|
46
|
-
* @param sizes Map of source key to estimated size of the current_data document, or undefined if current_data is not persisted.
|
|
47
|
-
*
|
|
48
|
-
*/
|
|
49
|
-
*batched(sizes: Map<string, number> | undefined): Generator<RecordOperation[]> {
|
|
50
|
-
if (sizes == null) {
|
|
51
|
-
yield this.batch;
|
|
52
|
-
return;
|
|
53
|
-
}
|
|
54
|
-
let currentBatch: RecordOperation[] = [];
|
|
55
|
-
let currentBatchSize = 0;
|
|
56
|
-
for (let op of this.batch) {
|
|
57
|
-
const key = op.internalBeforeKey;
|
|
58
|
-
const size = sizes.get(key) ?? 0;
|
|
59
|
-
if (currentBatchSize + size > MAX_CURRENT_DATA_BATCH_SIZE && currentBatch.length > 0) {
|
|
60
|
-
yield currentBatch;
|
|
61
|
-
currentBatch = [];
|
|
62
|
-
currentBatchSize = 0;
|
|
63
|
-
}
|
|
64
|
-
currentBatchSize += size;
|
|
65
|
-
currentBatch.push(op);
|
|
66
|
-
}
|
|
67
|
-
if (currentBatch.length > 0) {
|
|
68
|
-
yield currentBatch;
|
|
69
|
-
}
|
|
70
|
-
}
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
export class RecordOperation {
|
|
74
|
-
public readonly afterId: storage.ReplicaId | null;
|
|
75
|
-
public readonly beforeId: storage.ReplicaId;
|
|
76
|
-
public readonly internalBeforeKey: string;
|
|
77
|
-
public readonly internalAfterKey: string | null;
|
|
78
|
-
public readonly estimatedSize: number;
|
|
79
|
-
|
|
80
|
-
constructor(public readonly record: storage.SaveOptions) {
|
|
81
|
-
const afterId = record.afterReplicaId ?? null;
|
|
82
|
-
const beforeId = record.beforeReplicaId ?? record.afterReplicaId;
|
|
83
|
-
this.afterId = afterId;
|
|
84
|
-
this.beforeId = beforeId;
|
|
85
|
-
this.internalBeforeKey = cacheKey(record.sourceTable.id, beforeId);
|
|
86
|
-
this.internalAfterKey = afterId ? cacheKey(record.sourceTable.id, afterId) : null;
|
|
87
|
-
|
|
88
|
-
this.estimatedSize = estimateRowSize(record.before) + estimateRowSize(record.after);
|
|
89
|
-
}
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
/**
|
|
93
|
-
* In-memory cache key - must not be persisted.
|
|
94
|
-
*/
|
|
95
|
-
export function cacheKey(table: bson.ObjectId, id: storage.ReplicaId) {
|
|
96
|
-
if (storage.isUUID(id)) {
|
|
97
|
-
return `${table.toHexString()}.${id.toHexString()}`;
|
|
98
|
-
} else if (typeof id == 'string') {
|
|
99
|
-
return `${table.toHexString()}.${id}`;
|
|
100
|
-
} else {
|
|
101
|
-
return `${table.toHexString()}.${(bson.serialize({ id: id }) as Buffer).toString('base64')}`;
|
|
102
|
-
}
|
|
103
|
-
}
|
|
104
|
-
|
|
105
|
-
/**
|
|
106
|
-
* Estimate in-memory size of row.
|
|
107
|
-
*/
|
|
108
|
-
function estimateRowSize(record: ToastableSqliteRow | undefined) {
|
|
109
|
-
if (record == null) {
|
|
110
|
-
return 12;
|
|
111
|
-
}
|
|
112
|
-
let size = 0;
|
|
113
|
-
for (let [key, value] of Object.entries(record)) {
|
|
114
|
-
size += 12 + key.length;
|
|
115
|
-
// number | string | null | bigint | Uint8Array
|
|
116
|
-
if (value == null) {
|
|
117
|
-
size += 4;
|
|
118
|
-
} else if (typeof value == 'number') {
|
|
119
|
-
size += 8;
|
|
120
|
-
} else if (typeof value == 'bigint') {
|
|
121
|
-
size += 8;
|
|
122
|
-
} else if (typeof value == 'string') {
|
|
123
|
-
size += value.length;
|
|
124
|
-
} else if (value instanceof Uint8Array) {
|
|
125
|
-
size += value.byteLength;
|
|
126
|
-
}
|
|
127
|
-
}
|
|
128
|
-
return size;
|
|
129
|
-
}
|
|
@@ -1,283 +0,0 @@
|
|
|
1
|
-
import { JSONBig } from '@powersync/service-jsonbig';
|
|
2
|
-
import { EvaluatedParameters, EvaluatedRow } from '@powersync/service-sync-rules';
|
|
3
|
-
import * as bson from 'bson';
|
|
4
|
-
import * as mongo from 'mongodb';
|
|
5
|
-
|
|
6
|
-
import { logger } from '@powersync/lib-services-framework';
|
|
7
|
-
import { storage, utils } from '@powersync/service-core';
|
|
8
|
-
import { currentBucketKey } from './MongoBucketBatch.js';
|
|
9
|
-
import { MongoIdSequence } from './MongoIdSequence.js';
|
|
10
|
-
import { PowerSyncMongo } from './db.js';
|
|
11
|
-
import {
|
|
12
|
-
BucketDataDocument,
|
|
13
|
-
BucketParameterDocument,
|
|
14
|
-
CurrentBucket,
|
|
15
|
-
CurrentDataDocument,
|
|
16
|
-
SourceKey
|
|
17
|
-
} from './models.js';
|
|
18
|
-
import { replicaIdToSubkey, safeBulkWrite } from './util.js';
|
|
19
|
-
|
|
20
|
-
/**
|
|
21
|
-
* Maximum size of operations we write in a single transaction.
|
|
22
|
-
*
|
|
23
|
-
* It's tricky to find the exact limit, but from experience, over 100MB
|
|
24
|
-
* can cause an error:
|
|
25
|
-
* > transaction is too large and will not fit in the storage engine cache
|
|
26
|
-
*
|
|
27
|
-
* Additionally, unbounded size here can balloon our memory usage in some edge
|
|
28
|
-
* cases.
|
|
29
|
-
*
|
|
30
|
-
* When we reach this threshold, we commit the transaction and start a new one.
|
|
31
|
-
*/
|
|
32
|
-
const MAX_TRANSACTION_BATCH_SIZE = 30_000_000;
|
|
33
|
-
|
|
34
|
-
/**
|
|
35
|
-
* Limit number of documents to write in a single transaction.
|
|
36
|
-
*
|
|
37
|
-
* This has an effect on error message size in some cases.
|
|
38
|
-
*/
|
|
39
|
-
const MAX_TRANSACTION_DOC_COUNT = 2_000;
|
|
40
|
-
|
|
41
|
-
/**
|
|
42
|
-
* Keeps track of bulkwrite operations within a transaction.
|
|
43
|
-
*
|
|
44
|
-
* There may be multiple of these batches per transaction, but it may not span
|
|
45
|
-
* multiple transactions.
|
|
46
|
-
*/
|
|
47
|
-
export class PersistedBatch {
|
|
48
|
-
bucketData: mongo.AnyBulkWriteOperation<BucketDataDocument>[] = [];
|
|
49
|
-
bucketParameters: mongo.AnyBulkWriteOperation<BucketParameterDocument>[] = [];
|
|
50
|
-
currentData: mongo.AnyBulkWriteOperation<CurrentDataDocument>[] = [];
|
|
51
|
-
|
|
52
|
-
/**
|
|
53
|
-
* For debug logging only.
|
|
54
|
-
*/
|
|
55
|
-
debugLastOpId: bigint | null = null;
|
|
56
|
-
|
|
57
|
-
/**
|
|
58
|
-
* Very rough estimate of transaction size.
|
|
59
|
-
*/
|
|
60
|
-
currentSize = 0;
|
|
61
|
-
|
|
62
|
-
constructor(
|
|
63
|
-
private group_id: number,
|
|
64
|
-
writtenSize: number
|
|
65
|
-
) {
|
|
66
|
-
this.currentSize = writtenSize;
|
|
67
|
-
}
|
|
68
|
-
|
|
69
|
-
saveBucketData(options: {
|
|
70
|
-
op_seq: MongoIdSequence;
|
|
71
|
-
sourceKey: storage.ReplicaId;
|
|
72
|
-
table: storage.SourceTable;
|
|
73
|
-
evaluated: EvaluatedRow[];
|
|
74
|
-
before_buckets: CurrentBucket[];
|
|
75
|
-
}) {
|
|
76
|
-
const remaining_buckets = new Map<string, CurrentBucket>();
|
|
77
|
-
for (let b of options.before_buckets) {
|
|
78
|
-
const key = currentBucketKey(b);
|
|
79
|
-
remaining_buckets.set(key, b);
|
|
80
|
-
}
|
|
81
|
-
|
|
82
|
-
const dchecksum = utils.hashDelete(replicaIdToSubkey(options.table.id, options.sourceKey));
|
|
83
|
-
|
|
84
|
-
for (const k of options.evaluated) {
|
|
85
|
-
const key = currentBucketKey(k);
|
|
86
|
-
remaining_buckets.delete(key);
|
|
87
|
-
|
|
88
|
-
// INSERT
|
|
89
|
-
const recordData = JSONBig.stringify(k.data);
|
|
90
|
-
const checksum = utils.hashData(k.table, k.id, recordData);
|
|
91
|
-
this.currentSize += recordData.length + 200;
|
|
92
|
-
|
|
93
|
-
const op_id = options.op_seq.next();
|
|
94
|
-
this.debugLastOpId = op_id;
|
|
95
|
-
|
|
96
|
-
this.bucketData.push({
|
|
97
|
-
insertOne: {
|
|
98
|
-
document: {
|
|
99
|
-
_id: {
|
|
100
|
-
g: this.group_id,
|
|
101
|
-
b: k.bucket,
|
|
102
|
-
o: op_id
|
|
103
|
-
},
|
|
104
|
-
op: 'PUT',
|
|
105
|
-
source_table: options.table.id,
|
|
106
|
-
source_key: options.sourceKey,
|
|
107
|
-
table: k.table,
|
|
108
|
-
row_id: k.id,
|
|
109
|
-
checksum: checksum,
|
|
110
|
-
data: recordData
|
|
111
|
-
}
|
|
112
|
-
}
|
|
113
|
-
});
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
for (let bd of remaining_buckets.values()) {
|
|
117
|
-
// REMOVE
|
|
118
|
-
|
|
119
|
-
const op_id = options.op_seq.next();
|
|
120
|
-
this.debugLastOpId = op_id;
|
|
121
|
-
|
|
122
|
-
this.bucketData.push({
|
|
123
|
-
insertOne: {
|
|
124
|
-
document: {
|
|
125
|
-
_id: {
|
|
126
|
-
g: this.group_id,
|
|
127
|
-
b: bd.bucket,
|
|
128
|
-
o: op_id
|
|
129
|
-
},
|
|
130
|
-
op: 'REMOVE',
|
|
131
|
-
source_table: options.table.id,
|
|
132
|
-
source_key: options.sourceKey,
|
|
133
|
-
table: bd.table,
|
|
134
|
-
row_id: bd.id,
|
|
135
|
-
checksum: dchecksum,
|
|
136
|
-
data: null
|
|
137
|
-
}
|
|
138
|
-
}
|
|
139
|
-
});
|
|
140
|
-
this.currentSize += 200;
|
|
141
|
-
}
|
|
142
|
-
}
|
|
143
|
-
|
|
144
|
-
saveParameterData(data: {
|
|
145
|
-
op_seq: MongoIdSequence;
|
|
146
|
-
sourceKey: storage.ReplicaId;
|
|
147
|
-
sourceTable: storage.SourceTable;
|
|
148
|
-
evaluated: EvaluatedParameters[];
|
|
149
|
-
existing_lookups: bson.Binary[];
|
|
150
|
-
}) {
|
|
151
|
-
// This is similar to saving bucket data.
|
|
152
|
-
// A key difference is that we don't need to keep the history intact.
|
|
153
|
-
// We do need to keep track of recent history though - enough that we can get consistent data for any specific checkpoint.
|
|
154
|
-
// Instead of storing per bucket id, we store per "lookup".
|
|
155
|
-
// A key difference is that we don't need to store or keep track of anything per-bucket - the entire record is
|
|
156
|
-
// either persisted or removed.
|
|
157
|
-
// We also don't need to keep history intact.
|
|
158
|
-
const { sourceTable, sourceKey, evaluated } = data;
|
|
159
|
-
|
|
160
|
-
const remaining_lookups = new Map<string, bson.Binary>();
|
|
161
|
-
for (let l of data.existing_lookups) {
|
|
162
|
-
remaining_lookups.set(l.toString('base64'), l);
|
|
163
|
-
}
|
|
164
|
-
|
|
165
|
-
// 1. Insert new entries
|
|
166
|
-
for (let result of evaluated) {
|
|
167
|
-
const binLookup = storage.serializeLookup(result.lookup);
|
|
168
|
-
const hex = binLookup.toString('base64');
|
|
169
|
-
remaining_lookups.delete(hex);
|
|
170
|
-
|
|
171
|
-
const op_id = data.op_seq.next();
|
|
172
|
-
this.debugLastOpId = op_id;
|
|
173
|
-
this.bucketParameters.push({
|
|
174
|
-
insertOne: {
|
|
175
|
-
document: {
|
|
176
|
-
_id: op_id,
|
|
177
|
-
key: {
|
|
178
|
-
g: this.group_id,
|
|
179
|
-
t: sourceTable.id,
|
|
180
|
-
k: sourceKey
|
|
181
|
-
},
|
|
182
|
-
lookup: binLookup,
|
|
183
|
-
bucket_parameters: result.bucket_parameters
|
|
184
|
-
}
|
|
185
|
-
}
|
|
186
|
-
});
|
|
187
|
-
|
|
188
|
-
this.currentSize += 200;
|
|
189
|
-
}
|
|
190
|
-
|
|
191
|
-
// 2. "REMOVE" entries for any lookup not touched.
|
|
192
|
-
for (let lookup of remaining_lookups.values()) {
|
|
193
|
-
const op_id = data.op_seq.next();
|
|
194
|
-
this.debugLastOpId = op_id;
|
|
195
|
-
this.bucketParameters.push({
|
|
196
|
-
insertOne: {
|
|
197
|
-
document: {
|
|
198
|
-
_id: op_id,
|
|
199
|
-
key: {
|
|
200
|
-
g: this.group_id,
|
|
201
|
-
t: sourceTable.id,
|
|
202
|
-
k: sourceKey
|
|
203
|
-
},
|
|
204
|
-
lookup: lookup,
|
|
205
|
-
bucket_parameters: []
|
|
206
|
-
}
|
|
207
|
-
}
|
|
208
|
-
});
|
|
209
|
-
|
|
210
|
-
this.currentSize += 200;
|
|
211
|
-
}
|
|
212
|
-
}
|
|
213
|
-
|
|
214
|
-
deleteCurrentData(id: SourceKey) {
|
|
215
|
-
const op: mongo.AnyBulkWriteOperation<CurrentDataDocument> = {
|
|
216
|
-
deleteOne: {
|
|
217
|
-
filter: { _id: id }
|
|
218
|
-
}
|
|
219
|
-
};
|
|
220
|
-
this.currentData.push(op);
|
|
221
|
-
this.currentSize += 50;
|
|
222
|
-
}
|
|
223
|
-
|
|
224
|
-
upsertCurrentData(id: SourceKey, values: Partial<CurrentDataDocument>) {
|
|
225
|
-
const op: mongo.AnyBulkWriteOperation<CurrentDataDocument> = {
|
|
226
|
-
updateOne: {
|
|
227
|
-
filter: { _id: id },
|
|
228
|
-
update: {
|
|
229
|
-
$set: values
|
|
230
|
-
},
|
|
231
|
-
upsert: true
|
|
232
|
-
}
|
|
233
|
-
};
|
|
234
|
-
this.currentData.push(op);
|
|
235
|
-
this.currentSize += (values.data?.length() ?? 0) + 100;
|
|
236
|
-
}
|
|
237
|
-
|
|
238
|
-
shouldFlushTransaction() {
|
|
239
|
-
return (
|
|
240
|
-
this.currentSize >= MAX_TRANSACTION_BATCH_SIZE ||
|
|
241
|
-
this.bucketData.length >= MAX_TRANSACTION_DOC_COUNT ||
|
|
242
|
-
this.currentData.length >= MAX_TRANSACTION_DOC_COUNT ||
|
|
243
|
-
this.bucketParameters.length >= MAX_TRANSACTION_DOC_COUNT
|
|
244
|
-
);
|
|
245
|
-
}
|
|
246
|
-
|
|
247
|
-
async flush(db: PowerSyncMongo, session: mongo.ClientSession) {
|
|
248
|
-
if (this.bucketData.length > 0) {
|
|
249
|
-
// calculate total size
|
|
250
|
-
await safeBulkWrite(db.bucket_data, this.bucketData, {
|
|
251
|
-
session,
|
|
252
|
-
// inserts only - order doesn't matter
|
|
253
|
-
ordered: false
|
|
254
|
-
});
|
|
255
|
-
}
|
|
256
|
-
if (this.bucketParameters.length > 0) {
|
|
257
|
-
await safeBulkWrite(db.bucket_parameters, this.bucketParameters, {
|
|
258
|
-
session,
|
|
259
|
-
// inserts only - order doesn't matter
|
|
260
|
-
ordered: false
|
|
261
|
-
});
|
|
262
|
-
}
|
|
263
|
-
if (this.currentData.length > 0) {
|
|
264
|
-
await safeBulkWrite(db.current_data, this.currentData, {
|
|
265
|
-
session,
|
|
266
|
-
// may update and delete data within the same batch - order matters
|
|
267
|
-
ordered: true
|
|
268
|
-
});
|
|
269
|
-
}
|
|
270
|
-
|
|
271
|
-
logger.info(
|
|
272
|
-
`powersync_${this.group_id} Flushed ${this.bucketData.length} + ${this.bucketParameters.length} + ${
|
|
273
|
-
this.currentData.length
|
|
274
|
-
} updates, ${Math.round(this.currentSize / 1024)}kb. Last op_id: ${this.debugLastOpId}`
|
|
275
|
-
);
|
|
276
|
-
|
|
277
|
-
this.bucketData = [];
|
|
278
|
-
this.bucketParameters = [];
|
|
279
|
-
this.currentData = [];
|
|
280
|
-
this.currentSize = 0;
|
|
281
|
-
this.debugLastOpId = null;
|
|
282
|
-
}
|
|
283
|
-
}
|
|
@@ -1,40 +0,0 @@
|
|
|
1
|
-
import * as urijs from 'uri-js';
|
|
2
|
-
|
|
3
|
-
export interface MongoConnectionConfig {
|
|
4
|
-
uri: string;
|
|
5
|
-
username?: string;
|
|
6
|
-
password?: string;
|
|
7
|
-
database?: string;
|
|
8
|
-
}
|
|
9
|
-
|
|
10
|
-
/**
|
|
11
|
-
* Validate and normalize connection options.
|
|
12
|
-
*
|
|
13
|
-
* Returns destructured options.
|
|
14
|
-
*
|
|
15
|
-
* For use by both storage and mongo module.
|
|
16
|
-
*/
|
|
17
|
-
export function normalizeMongoConfig(options: MongoConnectionConfig) {
|
|
18
|
-
let uri = urijs.parse(options.uri);
|
|
19
|
-
|
|
20
|
-
const database = options.database ?? uri.path?.substring(1) ?? '';
|
|
21
|
-
|
|
22
|
-
const userInfo = uri.userinfo?.split(':');
|
|
23
|
-
|
|
24
|
-
const username = options.username ?? userInfo?.[0];
|
|
25
|
-
const password = options.password ?? userInfo?.[1];
|
|
26
|
-
|
|
27
|
-
if (database == '') {
|
|
28
|
-
throw new Error(`database required`);
|
|
29
|
-
}
|
|
30
|
-
|
|
31
|
-
delete uri.userinfo;
|
|
32
|
-
|
|
33
|
-
return {
|
|
34
|
-
uri: urijs.serialize(uri),
|
|
35
|
-
database,
|
|
36
|
-
|
|
37
|
-
username,
|
|
38
|
-
password
|
|
39
|
-
};
|
|
40
|
-
}
|
|
@@ -1,87 +0,0 @@
|
|
|
1
|
-
import { storage } from '@powersync/service-core';
|
|
2
|
-
import { configFile } from '@powersync/service-types';
|
|
3
|
-
import * as mongo from 'mongodb';
|
|
4
|
-
import * as db from '../../db/db-index.js';
|
|
5
|
-
import { Lock } from '../../locks/MonogLocks.js';
|
|
6
|
-
import {
|
|
7
|
-
BucketDataDocument,
|
|
8
|
-
BucketParameterDocument,
|
|
9
|
-
CurrentDataDocument,
|
|
10
|
-
CustomWriteCheckpointDocument,
|
|
11
|
-
IdSequenceDocument,
|
|
12
|
-
InstanceDocument,
|
|
13
|
-
SourceTableDocument,
|
|
14
|
-
SyncRuleDocument,
|
|
15
|
-
WriteCheckpointDocument
|
|
16
|
-
} from './models.js';
|
|
17
|
-
|
|
18
|
-
export interface PowerSyncMongoOptions {
|
|
19
|
-
/**
|
|
20
|
-
* Optional - uses the database from the MongoClient connection URI if not specified.
|
|
21
|
-
*/
|
|
22
|
-
database?: string;
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
export function createPowerSyncMongo(config: configFile.MongoStorageConfig) {
|
|
26
|
-
return new PowerSyncMongo(db.mongo.createMongoClient(config), { database: config.database });
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
export class PowerSyncMongo {
|
|
30
|
-
readonly current_data: mongo.Collection<CurrentDataDocument>;
|
|
31
|
-
readonly bucket_data: mongo.Collection<BucketDataDocument>;
|
|
32
|
-
readonly bucket_parameters: mongo.Collection<BucketParameterDocument>;
|
|
33
|
-
readonly op_id_sequence: mongo.Collection<IdSequenceDocument>;
|
|
34
|
-
readonly sync_rules: mongo.Collection<SyncRuleDocument>;
|
|
35
|
-
readonly source_tables: mongo.Collection<SourceTableDocument>;
|
|
36
|
-
readonly custom_write_checkpoints: mongo.Collection<CustomWriteCheckpointDocument>;
|
|
37
|
-
readonly write_checkpoints: mongo.Collection<WriteCheckpointDocument>;
|
|
38
|
-
readonly instance: mongo.Collection<InstanceDocument>;
|
|
39
|
-
readonly locks: mongo.Collection<Lock>;
|
|
40
|
-
|
|
41
|
-
readonly client: mongo.MongoClient;
|
|
42
|
-
readonly db: mongo.Db;
|
|
43
|
-
|
|
44
|
-
constructor(client: mongo.MongoClient, options?: PowerSyncMongoOptions) {
|
|
45
|
-
this.client = client;
|
|
46
|
-
|
|
47
|
-
const db = client.db(options?.database, {
|
|
48
|
-
...storage.BSON_DESERIALIZE_OPTIONS
|
|
49
|
-
});
|
|
50
|
-
this.db = db;
|
|
51
|
-
|
|
52
|
-
this.current_data = db.collection<CurrentDataDocument>('current_data');
|
|
53
|
-
this.bucket_data = db.collection('bucket_data');
|
|
54
|
-
this.bucket_parameters = db.collection('bucket_parameters');
|
|
55
|
-
this.op_id_sequence = db.collection('op_id_sequence');
|
|
56
|
-
this.sync_rules = db.collection('sync_rules');
|
|
57
|
-
this.source_tables = db.collection('source_tables');
|
|
58
|
-
this.custom_write_checkpoints = db.collection('custom_write_checkpoints');
|
|
59
|
-
this.write_checkpoints = db.collection('write_checkpoints');
|
|
60
|
-
this.instance = db.collection('instance');
|
|
61
|
-
this.locks = this.db.collection('locks');
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
/**
|
|
65
|
-
* Clear all collections.
|
|
66
|
-
*/
|
|
67
|
-
async clear() {
|
|
68
|
-
await this.current_data.deleteMany({});
|
|
69
|
-
await this.bucket_data.deleteMany({});
|
|
70
|
-
await this.bucket_parameters.deleteMany({});
|
|
71
|
-
await this.op_id_sequence.deleteMany({});
|
|
72
|
-
await this.sync_rules.deleteMany({});
|
|
73
|
-
await this.source_tables.deleteMany({});
|
|
74
|
-
await this.write_checkpoints.deleteMany({});
|
|
75
|
-
await this.instance.deleteOne({});
|
|
76
|
-
await this.locks.deleteMany({});
|
|
77
|
-
}
|
|
78
|
-
|
|
79
|
-
/**
|
|
80
|
-
* Drop the entire database.
|
|
81
|
-
*
|
|
82
|
-
* Primarily for tests.
|
|
83
|
-
*/
|
|
84
|
-
async drop() {
|
|
85
|
-
await this.db.dropDatabase();
|
|
86
|
-
}
|
|
87
|
-
}
|