@powersync/common 0.0.0-dev-20260503073249 → 0.0.0-dev-20260504100448
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bundle.cjs +14 -659
- package/dist/bundle.cjs.map +1 -1
- package/dist/bundle.mjs +15 -648
- package/dist/bundle.mjs.map +1 -1
- package/dist/bundle.node.cjs +14 -659
- package/dist/bundle.node.cjs.map +1 -1
- package/dist/bundle.node.mjs +15 -648
- package/dist/bundle.node.mjs.map +1 -1
- package/dist/index.d.cts +19 -358
- package/legacy/sync_protocol.d.ts +103 -0
- package/lib/client/sync/bucket/BucketStorageAdapter.d.ts +1 -63
- package/lib/client/sync/bucket/BucketStorageAdapter.js.map +1 -1
- package/lib/client/sync/bucket/SqliteBucketStorage.d.ts +1 -28
- package/lib/client/sync/bucket/SqliteBucketStorage.js +0 -162
- package/lib/client/sync/bucket/SqliteBucketStorage.js.map +1 -1
- package/lib/client/sync/stream/AbstractRemote.d.ts +2 -12
- package/lib/client/sync/stream/AbstractRemote.js +3 -13
- package/lib/client/sync/stream/AbstractRemote.js.map +1 -1
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.d.ts +9 -35
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js +11 -338
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js.map +1 -1
- package/lib/client/sync/stream/JsonValue.d.ts +7 -0
- package/lib/client/sync/stream/JsonValue.js +2 -0
- package/lib/client/sync/stream/JsonValue.js.map +1 -0
- package/lib/client/sync/stream/core-instruction.d.ts +1 -2
- package/lib/client/sync/stream/core-instruction.js.map +1 -1
- package/lib/index.d.ts +1 -5
- package/lib/index.js +1 -5
- package/lib/index.js.map +1 -1
- package/package.json +7 -4
- package/src/client/sync/bucket/BucketStorageAdapter.ts +1 -70
- package/src/client/sync/bucket/SqliteBucketStorage.ts +1 -197
- package/src/client/sync/stream/AbstractRemote.ts +5 -27
- package/src/client/sync/stream/AbstractStreamingSyncImplementation.ts +22 -402
- package/src/client/sync/stream/JsonValue.ts +8 -0
- package/src/client/sync/stream/core-instruction.ts +1 -2
- package/src/index.ts +1 -5
- package/lib/client/sync/bucket/OpType.d.ts +0 -16
- package/lib/client/sync/bucket/OpType.js +0 -23
- package/lib/client/sync/bucket/OpType.js.map +0 -1
- package/lib/client/sync/bucket/OplogEntry.d.ts +0 -23
- package/lib/client/sync/bucket/OplogEntry.js +0 -36
- package/lib/client/sync/bucket/OplogEntry.js.map +0 -1
- package/lib/client/sync/bucket/SyncDataBatch.d.ts +0 -6
- package/lib/client/sync/bucket/SyncDataBatch.js +0 -12
- package/lib/client/sync/bucket/SyncDataBatch.js.map +0 -1
- package/lib/client/sync/bucket/SyncDataBucket.d.ts +0 -40
- package/lib/client/sync/bucket/SyncDataBucket.js +0 -40
- package/lib/client/sync/bucket/SyncDataBucket.js.map +0 -1
- package/lib/client/sync/stream/streaming-sync-types.d.ts +0 -143
- package/lib/client/sync/stream/streaming-sync-types.js +0 -26
- package/lib/client/sync/stream/streaming-sync-types.js.map +0 -1
- package/src/client/sync/bucket/OpType.ts +0 -23
- package/src/client/sync/bucket/OplogEntry.ts +0 -50
- package/src/client/sync/bucket/SyncDataBatch.ts +0 -11
- package/src/client/sync/bucket/SyncDataBucket.ts +0 -49
- package/src/client/sync/stream/streaming-sync-types.ts +0 -210
package/dist/bundle.node.mjs
CHANGED
|
@@ -3335,103 +3335,6 @@ class AbortOperation extends Error {
|
|
|
3335
3335
|
}
|
|
3336
3336
|
}
|
|
3337
3337
|
|
|
3338
|
-
var OpTypeEnum;
|
|
3339
|
-
(function (OpTypeEnum) {
|
|
3340
|
-
OpTypeEnum[OpTypeEnum["CLEAR"] = 1] = "CLEAR";
|
|
3341
|
-
OpTypeEnum[OpTypeEnum["MOVE"] = 2] = "MOVE";
|
|
3342
|
-
OpTypeEnum[OpTypeEnum["PUT"] = 3] = "PUT";
|
|
3343
|
-
OpTypeEnum[OpTypeEnum["REMOVE"] = 4] = "REMOVE";
|
|
3344
|
-
})(OpTypeEnum || (OpTypeEnum = {}));
|
|
3345
|
-
/**
|
|
3346
|
-
* Used internally for sync buckets.
|
|
3347
|
-
*/
|
|
3348
|
-
class OpType {
|
|
3349
|
-
value;
|
|
3350
|
-
static fromJSON(jsonValue) {
|
|
3351
|
-
return new OpType(OpTypeEnum[jsonValue]);
|
|
3352
|
-
}
|
|
3353
|
-
constructor(value) {
|
|
3354
|
-
this.value = value;
|
|
3355
|
-
}
|
|
3356
|
-
toJSON() {
|
|
3357
|
-
return Object.entries(OpTypeEnum).find(([, value]) => value === this.value)[0];
|
|
3358
|
-
}
|
|
3359
|
-
}
|
|
3360
|
-
|
|
3361
|
-
class OplogEntry {
|
|
3362
|
-
op_id;
|
|
3363
|
-
op;
|
|
3364
|
-
checksum;
|
|
3365
|
-
subkey;
|
|
3366
|
-
object_type;
|
|
3367
|
-
object_id;
|
|
3368
|
-
data;
|
|
3369
|
-
static fromRow(row) {
|
|
3370
|
-
return new OplogEntry(row.op_id, OpType.fromJSON(row.op), row.checksum, row.subkey, row.object_type, row.object_id, row.data);
|
|
3371
|
-
}
|
|
3372
|
-
constructor(op_id, op, checksum, subkey, object_type, object_id, data) {
|
|
3373
|
-
this.op_id = op_id;
|
|
3374
|
-
this.op = op;
|
|
3375
|
-
this.checksum = checksum;
|
|
3376
|
-
this.subkey = subkey;
|
|
3377
|
-
this.object_type = object_type;
|
|
3378
|
-
this.object_id = object_id;
|
|
3379
|
-
this.data = data;
|
|
3380
|
-
}
|
|
3381
|
-
toJSON(fixedKeyEncoding = false) {
|
|
3382
|
-
return {
|
|
3383
|
-
op_id: this.op_id,
|
|
3384
|
-
op: this.op.toJSON(),
|
|
3385
|
-
object_type: this.object_type,
|
|
3386
|
-
object_id: this.object_id,
|
|
3387
|
-
checksum: this.checksum,
|
|
3388
|
-
data: this.data,
|
|
3389
|
-
// Older versions of the JS SDK used to always JSON.stringify here. That has always been wrong,
|
|
3390
|
-
// but we need to migrate gradually to not break existing databases.
|
|
3391
|
-
subkey: fixedKeyEncoding ? this.subkey : JSON.stringify(this.subkey)
|
|
3392
|
-
};
|
|
3393
|
-
}
|
|
3394
|
-
}
|
|
3395
|
-
|
|
3396
|
-
class SyncDataBucket {
|
|
3397
|
-
bucket;
|
|
3398
|
-
data;
|
|
3399
|
-
has_more;
|
|
3400
|
-
after;
|
|
3401
|
-
next_after;
|
|
3402
|
-
static fromRow(row) {
|
|
3403
|
-
return new SyncDataBucket(row.bucket, row.data.map((entry) => OplogEntry.fromRow(entry)), row.has_more ?? false, row.after, row.next_after);
|
|
3404
|
-
}
|
|
3405
|
-
constructor(bucket, data,
|
|
3406
|
-
/**
|
|
3407
|
-
* True if the response does not contain all the data for this bucket, and another request must be made.
|
|
3408
|
-
*/
|
|
3409
|
-
has_more,
|
|
3410
|
-
/**
|
|
3411
|
-
* The `after` specified in the request.
|
|
3412
|
-
*/
|
|
3413
|
-
after,
|
|
3414
|
-
/**
|
|
3415
|
-
* Use this for the next request.
|
|
3416
|
-
*/
|
|
3417
|
-
next_after) {
|
|
3418
|
-
this.bucket = bucket;
|
|
3419
|
-
this.data = data;
|
|
3420
|
-
this.has_more = has_more;
|
|
3421
|
-
this.after = after;
|
|
3422
|
-
this.next_after = next_after;
|
|
3423
|
-
}
|
|
3424
|
-
toJSON(fixedKeyEncoding = false) {
|
|
3425
|
-
return {
|
|
3426
|
-
bucket: this.bucket,
|
|
3427
|
-
has_more: this.has_more,
|
|
3428
|
-
after: this.after,
|
|
3429
|
-
next_after: this.next_after,
|
|
3430
|
-
data: this.data.map((entry) => entry.toJSON(fixedKeyEncoding))
|
|
3431
|
-
};
|
|
3432
|
-
}
|
|
3433
|
-
}
|
|
3434
|
-
|
|
3435
3338
|
var dist = {};
|
|
3436
3339
|
|
|
3437
3340
|
var Codecs = {};
|
|
@@ -8773,22 +8676,12 @@ class AbstractRemote {
|
|
|
8773
8676
|
* Returns a data stream of sync line data, fetched via RSocket-over-WebSocket.
|
|
8774
8677
|
*
|
|
8775
8678
|
* The only mechanism to abort the returned stream is to use the abort signal in {@link SocketSyncStreamOptions}.
|
|
8776
|
-
*
|
|
8777
|
-
* @param bson A BSON encoder and decoder. When set, the data stream will be requested with a BSON payload
|
|
8778
|
-
* (required for compatibility with older sync services).
|
|
8779
8679
|
*/
|
|
8780
|
-
async socketStreamRaw(options
|
|
8680
|
+
async socketStreamRaw(options) {
|
|
8781
8681
|
const { path, fetchStrategy = FetchStrategy.Buffered } = options;
|
|
8782
|
-
const mimeType =
|
|
8682
|
+
const mimeType = 'application/json';
|
|
8783
8683
|
function toBuffer(js) {
|
|
8784
|
-
|
|
8785
|
-
if (bson != null) {
|
|
8786
|
-
contents = bson.serialize(js);
|
|
8787
|
-
}
|
|
8788
|
-
else {
|
|
8789
|
-
contents = JSON.stringify(js);
|
|
8790
|
-
}
|
|
8791
|
-
return Buffer.from(contents);
|
|
8684
|
+
return Buffer.from(JSON.stringify(js));
|
|
8792
8685
|
}
|
|
8793
8686
|
const syncQueueRequestSize = fetchStrategy == FetchStrategy.Buffered ? 10 : 1;
|
|
8794
8687
|
const request = await this.buildRequest(path);
|
|
@@ -9090,32 +8983,6 @@ function coreStatusToJs(status) {
|
|
|
9090
8983
|
};
|
|
9091
8984
|
}
|
|
9092
8985
|
|
|
9093
|
-
function isStreamingSyncData(line) {
|
|
9094
|
-
return line.data != null;
|
|
9095
|
-
}
|
|
9096
|
-
function isStreamingKeepalive(line) {
|
|
9097
|
-
return line.token_expires_in != null;
|
|
9098
|
-
}
|
|
9099
|
-
function isStreamingSyncCheckpoint(line) {
|
|
9100
|
-
return line.checkpoint != null;
|
|
9101
|
-
}
|
|
9102
|
-
function isStreamingSyncCheckpointComplete(line) {
|
|
9103
|
-
return line.checkpoint_complete != null;
|
|
9104
|
-
}
|
|
9105
|
-
function isStreamingSyncCheckpointPartiallyComplete(line) {
|
|
9106
|
-
return line.partial_checkpoint_complete != null;
|
|
9107
|
-
}
|
|
9108
|
-
function isStreamingSyncCheckpointDiff(line) {
|
|
9109
|
-
return line.checkpoint_diff != null;
|
|
9110
|
-
}
|
|
9111
|
-
function isContinueCheckpointRequest(request) {
|
|
9112
|
-
return (Array.isArray(request.buckets) &&
|
|
9113
|
-
typeof request.checkpoint_token == 'string');
|
|
9114
|
-
}
|
|
9115
|
-
function isSyncNewCheckpointRequest(request) {
|
|
9116
|
-
return typeof request.request_checkpoint == 'object';
|
|
9117
|
-
}
|
|
9118
|
-
|
|
9119
8986
|
var LockType;
|
|
9120
8987
|
(function (LockType) {
|
|
9121
8988
|
LockType["CRUD"] = "crud";
|
|
@@ -9128,35 +8995,21 @@ var SyncStreamConnectionMethod;
|
|
|
9128
8995
|
})(SyncStreamConnectionMethod || (SyncStreamConnectionMethod = {}));
|
|
9129
8996
|
var SyncClientImplementation;
|
|
9130
8997
|
(function (SyncClientImplementation) {
|
|
9131
|
-
/**
|
|
9132
|
-
* Decodes and handles sync lines received from the sync service in JavaScript.
|
|
9133
|
-
*
|
|
9134
|
-
* This is the default option.
|
|
9135
|
-
*
|
|
9136
|
-
* @deprecated We recommend the {@link RUST} client implementation for all apps. If you have issues with
|
|
9137
|
-
* the Rust client, please file an issue or reach out to us. The JavaScript client will be removed in a future
|
|
9138
|
-
* version of the PowerSync SDK.
|
|
9139
|
-
*/
|
|
9140
|
-
SyncClientImplementation["JAVASCRIPT"] = "js";
|
|
9141
8998
|
/**
|
|
9142
8999
|
* This implementation offloads the sync line decoding and handling into the PowerSync
|
|
9143
9000
|
* core extension.
|
|
9144
9001
|
*
|
|
9145
|
-
* This
|
|
9146
|
-
* recommended client implementation for all apps.
|
|
9002
|
+
* This is the only option, as an older JavaScript client implementation has been removed from the SDK.
|
|
9147
9003
|
*
|
|
9148
9004
|
* ## Compatibility warning
|
|
9149
9005
|
*
|
|
9150
9006
|
* The Rust sync client stores sync data in a format that is slightly different than the one used
|
|
9151
|
-
* by the old
|
|
9152
|
-
*
|
|
9153
|
-
* Further, the {@link JAVASCRIPT} client in recent versions of the PowerSync JS SDK (starting from
|
|
9154
|
-
* the version introducing {@link RUST} as an option) also supports the new format, so you can switch
|
|
9155
|
-
* back to {@link JAVASCRIPT} later.
|
|
9007
|
+
* by the old JavaScript client. When adopting the {@link RUST} client on existing databases, the PowerSync SDK will
|
|
9008
|
+
* migrate the format automatically.
|
|
9156
9009
|
*
|
|
9157
|
-
*
|
|
9158
|
-
*
|
|
9159
|
-
*
|
|
9010
|
+
* SDK versions supporting both the JavaScript and the Rust client support both formats with the JavaScript client
|
|
9011
|
+
* implementaiton. However, downgrading to an SDK version that only supports the JavaScript client would not be
|
|
9012
|
+
* possible anymore. Problematic SDK versions have been released before 2025-06-09.
|
|
9160
9013
|
*/
|
|
9161
9014
|
SyncClientImplementation["RUST"] = "rust";
|
|
9162
9015
|
})(SyncClientImplementation || (SyncClientImplementation = {}));
|
|
@@ -9179,13 +9032,7 @@ const DEFAULT_STREAM_CONNECTION_OPTIONS = {
|
|
|
9179
9032
|
serializedSchema: undefined,
|
|
9180
9033
|
includeDefaultStreams: true
|
|
9181
9034
|
};
|
|
9182
|
-
// The priority we assume when we receive checkpoint lines where no priority is set.
|
|
9183
|
-
// This is the default priority used by the sync service, but can be set to an arbitrary
|
|
9184
|
-
// value since sync services without priorities also won't send partial sync completion
|
|
9185
|
-
// messages.
|
|
9186
|
-
const FALLBACK_PRIORITY = 3;
|
|
9187
9035
|
class AbstractStreamingSyncImplementation extends BaseObserver {
|
|
9188
|
-
_lastSyncedAt;
|
|
9189
9036
|
options;
|
|
9190
9037
|
abortController;
|
|
9191
9038
|
// In rare cases, mostly for tests, uploads can be triggered without being properly connected.
|
|
@@ -9275,9 +9122,6 @@ class AbstractStreamingSyncImplementation extends BaseObserver {
|
|
|
9275
9122
|
this.crudUpdateListener = undefined;
|
|
9276
9123
|
this.uploadAbortController?.abort();
|
|
9277
9124
|
}
|
|
9278
|
-
async hasCompletedSync() {
|
|
9279
|
-
return this.options.adapter.hasCompletedSync();
|
|
9280
|
-
}
|
|
9281
9125
|
async getWriteCheckpoint() {
|
|
9282
9126
|
const clientId = await this.options.adapter.getClientId();
|
|
9283
9127
|
let path = `/write-checkpoint2.json?client_id=${clientId}`;
|
|
@@ -9359,7 +9203,7 @@ The next upload iteration will be delayed.`);
|
|
|
9359
9203
|
});
|
|
9360
9204
|
}
|
|
9361
9205
|
}
|
|
9362
|
-
this.uploadAbortController =
|
|
9206
|
+
this.uploadAbortController = undefined;
|
|
9363
9207
|
}
|
|
9364
9208
|
});
|
|
9365
9209
|
}
|
|
@@ -9519,18 +9363,6 @@ The next upload iteration will be delayed.`);
|
|
|
9519
9363
|
// iteration is active. That allows us to reconnect ASAP, instead of having to wait for the next sync line.
|
|
9520
9364
|
this.handleActiveStreamsChange?.();
|
|
9521
9365
|
}
|
|
9522
|
-
async collectLocalBucketState() {
|
|
9523
|
-
const bucketEntries = await this.options.adapter.getBucketStates();
|
|
9524
|
-
const req = bucketEntries.map((entry) => ({
|
|
9525
|
-
name: entry.bucket,
|
|
9526
|
-
after: entry.op_id
|
|
9527
|
-
}));
|
|
9528
|
-
const localDescriptions = new Map();
|
|
9529
|
-
for (const entry of bucketEntries) {
|
|
9530
|
-
localDescriptions.set(entry.bucket, null);
|
|
9531
|
-
}
|
|
9532
|
-
return [req, localDescriptions];
|
|
9533
|
-
}
|
|
9534
9366
|
/**
|
|
9535
9367
|
* Older versions of the JS SDK used to encode subkeys as JSON in {@link OplogEntry.toJSON}.
|
|
9536
9368
|
* Because subkeys are always strings, this leads to quotes being added around them in `ps_oplog`.
|
|
@@ -9572,19 +9404,13 @@ The next upload iteration will be delayed.`);
|
|
|
9572
9404
|
}
|
|
9573
9405
|
const clientImplementation = resolvedOptions.clientImplementation;
|
|
9574
9406
|
this.updateSyncStatus({ clientImplementation });
|
|
9575
|
-
|
|
9576
|
-
|
|
9577
|
-
return null;
|
|
9578
|
-
}
|
|
9579
|
-
else {
|
|
9580
|
-
await this.requireKeyFormat(true);
|
|
9581
|
-
return await this.rustSyncIteration(signal, resolvedOptions);
|
|
9582
|
-
}
|
|
9407
|
+
await this.requireKeyFormat(true);
|
|
9408
|
+
return await this.rustSyncIteration(signal, resolvedOptions);
|
|
9583
9409
|
}
|
|
9584
9410
|
});
|
|
9585
9411
|
}
|
|
9586
9412
|
async receiveSyncLines(data) {
|
|
9587
|
-
const { options, connection
|
|
9413
|
+
const { options, connection } = data;
|
|
9588
9414
|
const remote = this.options.remote;
|
|
9589
9415
|
if (connection.connectionMethod == SyncStreamConnectionMethod.HTTP) {
|
|
9590
9416
|
return await remote.fetchStream(options);
|
|
@@ -9593,232 +9419,8 @@ The next upload iteration will be delayed.`);
|
|
|
9593
9419
|
return await this.options.remote.socketStreamRaw({
|
|
9594
9420
|
...options,
|
|
9595
9421
|
...{ fetchStrategy: connection.fetchStrategy }
|
|
9596
|
-
}
|
|
9597
|
-
}
|
|
9598
|
-
}
|
|
9599
|
-
async legacyStreamingSyncIteration(signal, resolvedOptions) {
|
|
9600
|
-
const rawTables = resolvedOptions.serializedSchema?.raw_tables;
|
|
9601
|
-
if (rawTables != null && rawTables.length) {
|
|
9602
|
-
this.logger.warn('Raw tables require the Rust-based sync client. The JS client will ignore them.');
|
|
9603
|
-
}
|
|
9604
|
-
if (this.activeStreams.length) {
|
|
9605
|
-
this.logger.error('Sync streams require `clientImplementation: SyncClientImplementation.RUST` when connecting.');
|
|
9606
|
-
}
|
|
9607
|
-
this.logger.debug('Streaming sync iteration started');
|
|
9608
|
-
this.options.adapter.startSession();
|
|
9609
|
-
let [req, bucketMap] = await this.collectLocalBucketState();
|
|
9610
|
-
let targetCheckpoint = null;
|
|
9611
|
-
// A checkpoint that has been validated but not applied (e.g. due to pending local writes)
|
|
9612
|
-
let pendingValidatedCheckpoint = null;
|
|
9613
|
-
const clientId = await this.options.adapter.getClientId();
|
|
9614
|
-
const usingFixedKeyFormat = await this.requireKeyFormat(false);
|
|
9615
|
-
this.logger.debug('Requesting stream from server');
|
|
9616
|
-
const syncOptions = {
|
|
9617
|
-
path: '/sync/stream',
|
|
9618
|
-
abortSignal: signal,
|
|
9619
|
-
data: {
|
|
9620
|
-
buckets: req,
|
|
9621
|
-
include_checksum: true,
|
|
9622
|
-
raw_data: true,
|
|
9623
|
-
parameters: resolvedOptions.params,
|
|
9624
|
-
app_metadata: resolvedOptions.appMetadata,
|
|
9625
|
-
client_id: clientId
|
|
9626
|
-
}
|
|
9627
|
-
};
|
|
9628
|
-
const bson = await this.options.remote.getBSON();
|
|
9629
|
-
const source = await this.receiveSyncLines({
|
|
9630
|
-
options: syncOptions,
|
|
9631
|
-
connection: resolvedOptions,
|
|
9632
|
-
bson
|
|
9633
|
-
});
|
|
9634
|
-
const stream = injectable(map(source, (line) => {
|
|
9635
|
-
if (typeof line == 'string') {
|
|
9636
|
-
return JSON.parse(line);
|
|
9637
|
-
}
|
|
9638
|
-
else {
|
|
9639
|
-
return bson.deserialize(line);
|
|
9640
|
-
}
|
|
9641
|
-
}));
|
|
9642
|
-
this.logger.debug('Stream established. Processing events');
|
|
9643
|
-
this.notifyCompletedUploads = () => {
|
|
9644
|
-
stream.inject({ crud_upload_completed: null });
|
|
9645
|
-
};
|
|
9646
|
-
while (true) {
|
|
9647
|
-
const { value: line, done } = await stream.next();
|
|
9648
|
-
if (done) {
|
|
9649
|
-
// The stream has closed while waiting
|
|
9650
|
-
return;
|
|
9651
|
-
}
|
|
9652
|
-
if ('crud_upload_completed' in line) {
|
|
9653
|
-
if (pendingValidatedCheckpoint != null) {
|
|
9654
|
-
const { applied, endIteration } = await this.applyCheckpoint(pendingValidatedCheckpoint);
|
|
9655
|
-
if (applied) {
|
|
9656
|
-
pendingValidatedCheckpoint = null;
|
|
9657
|
-
}
|
|
9658
|
-
else if (endIteration) {
|
|
9659
|
-
break;
|
|
9660
|
-
}
|
|
9661
|
-
}
|
|
9662
|
-
continue;
|
|
9663
|
-
}
|
|
9664
|
-
// A connection is active and messages are being received
|
|
9665
|
-
if (!this.syncStatus.connected) {
|
|
9666
|
-
// There is a connection now
|
|
9667
|
-
Promise.resolve().then(() => this.triggerCrudUpload());
|
|
9668
|
-
this.updateSyncStatus({
|
|
9669
|
-
connected: true
|
|
9670
|
-
});
|
|
9671
|
-
}
|
|
9672
|
-
if (isStreamingSyncCheckpoint(line)) {
|
|
9673
|
-
targetCheckpoint = line.checkpoint;
|
|
9674
|
-
// New checkpoint - existing validated checkpoint is no longer valid
|
|
9675
|
-
pendingValidatedCheckpoint = null;
|
|
9676
|
-
const bucketsToDelete = new Set(bucketMap.keys());
|
|
9677
|
-
const newBuckets = new Map();
|
|
9678
|
-
for (const checksum of line.checkpoint.buckets) {
|
|
9679
|
-
newBuckets.set(checksum.bucket, {
|
|
9680
|
-
name: checksum.bucket,
|
|
9681
|
-
priority: checksum.priority ?? FALLBACK_PRIORITY
|
|
9682
|
-
});
|
|
9683
|
-
bucketsToDelete.delete(checksum.bucket);
|
|
9684
|
-
}
|
|
9685
|
-
if (bucketsToDelete.size > 0) {
|
|
9686
|
-
this.logger.debug('Removing buckets', [...bucketsToDelete]);
|
|
9687
|
-
}
|
|
9688
|
-
bucketMap = newBuckets;
|
|
9689
|
-
await this.options.adapter.removeBuckets([...bucketsToDelete]);
|
|
9690
|
-
await this.options.adapter.setTargetCheckpoint(targetCheckpoint);
|
|
9691
|
-
await this.updateSyncStatusForStartingCheckpoint(targetCheckpoint);
|
|
9692
|
-
}
|
|
9693
|
-
else if (isStreamingSyncCheckpointComplete(line)) {
|
|
9694
|
-
const result = await this.applyCheckpoint(targetCheckpoint);
|
|
9695
|
-
if (result.endIteration) {
|
|
9696
|
-
return;
|
|
9697
|
-
}
|
|
9698
|
-
else if (!result.applied) {
|
|
9699
|
-
// "Could not apply checkpoint due to local data". We need to retry after
|
|
9700
|
-
// finishing uploads.
|
|
9701
|
-
pendingValidatedCheckpoint = targetCheckpoint;
|
|
9702
|
-
}
|
|
9703
|
-
else {
|
|
9704
|
-
// Nothing to retry later. This would likely already be null from the last
|
|
9705
|
-
// checksum or checksum_diff operation, but we make sure.
|
|
9706
|
-
pendingValidatedCheckpoint = null;
|
|
9707
|
-
}
|
|
9708
|
-
}
|
|
9709
|
-
else if (isStreamingSyncCheckpointPartiallyComplete(line)) {
|
|
9710
|
-
const priority = line.partial_checkpoint_complete.priority;
|
|
9711
|
-
this.logger.debug('Partial checkpoint complete', priority);
|
|
9712
|
-
const result = await this.options.adapter.syncLocalDatabase(targetCheckpoint, priority);
|
|
9713
|
-
if (!result.checkpointValid) {
|
|
9714
|
-
// This means checksums failed. Start again with a new checkpoint.
|
|
9715
|
-
// TODO: better back-off
|
|
9716
|
-
await new Promise((resolve) => setTimeout(resolve, 50));
|
|
9717
|
-
return;
|
|
9718
|
-
}
|
|
9719
|
-
else if (!result.ready) ;
|
|
9720
|
-
else {
|
|
9721
|
-
// We'll keep on downloading, but can report that this priority is synced now.
|
|
9722
|
-
this.logger.debug('partial checkpoint validation succeeded');
|
|
9723
|
-
// All states with a higher priority can be deleted since this partial sync includes them.
|
|
9724
|
-
const priorityStates = this.syncStatus.priorityStatusEntries.filter((s) => s.priority <= priority);
|
|
9725
|
-
priorityStates.push({
|
|
9726
|
-
priority,
|
|
9727
|
-
lastSyncedAt: new Date(),
|
|
9728
|
-
hasSynced: true
|
|
9729
|
-
});
|
|
9730
|
-
this.updateSyncStatus({
|
|
9731
|
-
connected: true,
|
|
9732
|
-
priorityStatusEntries: priorityStates
|
|
9733
|
-
});
|
|
9734
|
-
}
|
|
9735
|
-
}
|
|
9736
|
-
else if (isStreamingSyncCheckpointDiff(line)) {
|
|
9737
|
-
// TODO: It may be faster to just keep track of the diff, instead of the entire checkpoint
|
|
9738
|
-
if (targetCheckpoint == null) {
|
|
9739
|
-
throw new Error('Checkpoint diff without previous checkpoint');
|
|
9740
|
-
}
|
|
9741
|
-
// New checkpoint - existing validated checkpoint is no longer valid
|
|
9742
|
-
pendingValidatedCheckpoint = null;
|
|
9743
|
-
const diff = line.checkpoint_diff;
|
|
9744
|
-
const newBuckets = new Map();
|
|
9745
|
-
for (const checksum of targetCheckpoint.buckets) {
|
|
9746
|
-
newBuckets.set(checksum.bucket, checksum);
|
|
9747
|
-
}
|
|
9748
|
-
for (const checksum of diff.updated_buckets) {
|
|
9749
|
-
newBuckets.set(checksum.bucket, checksum);
|
|
9750
|
-
}
|
|
9751
|
-
for (const bucket of diff.removed_buckets) {
|
|
9752
|
-
newBuckets.delete(bucket);
|
|
9753
|
-
}
|
|
9754
|
-
const newCheckpoint = {
|
|
9755
|
-
last_op_id: diff.last_op_id,
|
|
9756
|
-
buckets: [...newBuckets.values()],
|
|
9757
|
-
write_checkpoint: diff.write_checkpoint
|
|
9758
|
-
};
|
|
9759
|
-
targetCheckpoint = newCheckpoint;
|
|
9760
|
-
await this.updateSyncStatusForStartingCheckpoint(targetCheckpoint);
|
|
9761
|
-
bucketMap = new Map();
|
|
9762
|
-
newBuckets.forEach((checksum, name) => bucketMap.set(name, {
|
|
9763
|
-
name: checksum.bucket,
|
|
9764
|
-
priority: checksum.priority ?? FALLBACK_PRIORITY
|
|
9765
|
-
}));
|
|
9766
|
-
const bucketsToDelete = diff.removed_buckets;
|
|
9767
|
-
if (bucketsToDelete.length > 0) {
|
|
9768
|
-
this.logger.debug('Remove buckets', bucketsToDelete);
|
|
9769
|
-
}
|
|
9770
|
-
await this.options.adapter.removeBuckets(bucketsToDelete);
|
|
9771
|
-
await this.options.adapter.setTargetCheckpoint(targetCheckpoint);
|
|
9772
|
-
}
|
|
9773
|
-
else if (isStreamingSyncData(line)) {
|
|
9774
|
-
const { data } = line;
|
|
9775
|
-
const previousProgress = this.syncStatus.dataFlowStatus.downloadProgress;
|
|
9776
|
-
let updatedProgress = null;
|
|
9777
|
-
if (previousProgress) {
|
|
9778
|
-
updatedProgress = { ...previousProgress };
|
|
9779
|
-
const progressForBucket = updatedProgress[data.bucket];
|
|
9780
|
-
if (progressForBucket) {
|
|
9781
|
-
updatedProgress[data.bucket] = {
|
|
9782
|
-
...progressForBucket,
|
|
9783
|
-
since_last: progressForBucket.since_last + data.data.length
|
|
9784
|
-
};
|
|
9785
|
-
}
|
|
9786
|
-
}
|
|
9787
|
-
this.updateSyncStatus({
|
|
9788
|
-
dataFlow: {
|
|
9789
|
-
downloading: true,
|
|
9790
|
-
downloadProgress: updatedProgress
|
|
9791
|
-
}
|
|
9792
|
-
});
|
|
9793
|
-
await this.options.adapter.saveSyncData({ buckets: [SyncDataBucket.fromRow(data)] }, usingFixedKeyFormat);
|
|
9794
|
-
}
|
|
9795
|
-
else if (isStreamingKeepalive(line)) {
|
|
9796
|
-
const remaining_seconds = line.token_expires_in;
|
|
9797
|
-
if (remaining_seconds == 0) {
|
|
9798
|
-
// Connection would be closed automatically right after this
|
|
9799
|
-
this.logger.debug('Token expiring; reconnect');
|
|
9800
|
-
/**
|
|
9801
|
-
* For a rare case where the backend connector does not update the token
|
|
9802
|
-
* (uses the same one), this should have some delay.
|
|
9803
|
-
*/
|
|
9804
|
-
await this.delayRetry();
|
|
9805
|
-
return;
|
|
9806
|
-
}
|
|
9807
|
-
else if (remaining_seconds < 30) {
|
|
9808
|
-
this.logger.debug('Token will expire soon; reconnect');
|
|
9809
|
-
// Pre-emptively refresh the token
|
|
9810
|
-
this.options.remote.invalidateCredentials();
|
|
9811
|
-
return;
|
|
9812
|
-
}
|
|
9813
|
-
this.triggerCrudUpload();
|
|
9814
|
-
}
|
|
9815
|
-
else {
|
|
9816
|
-
this.logger.debug('Received unknown sync line', line);
|
|
9817
|
-
}
|
|
9422
|
+
});
|
|
9818
9423
|
}
|
|
9819
|
-
this.logger.debug('Stream input empty');
|
|
9820
|
-
// Connection closed. Likely due to auth issue.
|
|
9821
|
-
return;
|
|
9822
9424
|
}
|
|
9823
9425
|
async rustSyncIteration(signal, resolvedOptions) {
|
|
9824
9426
|
const syncImplementation = this;
|
|
@@ -9981,68 +9583,6 @@ The next upload iteration will be delayed.`);
|
|
|
9981
9583
|
}
|
|
9982
9584
|
return { immediateRestart: hideDisconnectOnRestart };
|
|
9983
9585
|
}
|
|
9984
|
-
async updateSyncStatusForStartingCheckpoint(checkpoint) {
|
|
9985
|
-
const localProgress = await this.options.adapter.getBucketOperationProgress();
|
|
9986
|
-
const progress = {};
|
|
9987
|
-
let invalidated = false;
|
|
9988
|
-
for (const bucket of checkpoint.buckets) {
|
|
9989
|
-
const savedProgress = localProgress[bucket.bucket];
|
|
9990
|
-
const atLast = savedProgress?.atLast ?? 0;
|
|
9991
|
-
const sinceLast = savedProgress?.sinceLast ?? 0;
|
|
9992
|
-
progress[bucket.bucket] = {
|
|
9993
|
-
// The fallback priority doesn't matter here, but 3 is the one newer versions of the sync service
|
|
9994
|
-
// will use by default.
|
|
9995
|
-
priority: bucket.priority ?? 3,
|
|
9996
|
-
at_last: atLast,
|
|
9997
|
-
since_last: sinceLast,
|
|
9998
|
-
target_count: bucket.count ?? 0
|
|
9999
|
-
};
|
|
10000
|
-
if (bucket.count != null && bucket.count < atLast + sinceLast) {
|
|
10001
|
-
// Either due to a defrag / sync rule deploy or a compaction operation, the size
|
|
10002
|
-
// of the bucket shrank so much that the local ops exceed the ops in the updated
|
|
10003
|
-
// bucket. We can't prossibly report progress in this case (it would overshoot 100%).
|
|
10004
|
-
invalidated = true;
|
|
10005
|
-
}
|
|
10006
|
-
}
|
|
10007
|
-
if (invalidated) {
|
|
10008
|
-
for (const bucket in progress) {
|
|
10009
|
-
const bucketProgress = progress[bucket];
|
|
10010
|
-
bucketProgress.at_last = 0;
|
|
10011
|
-
bucketProgress.since_last = 0;
|
|
10012
|
-
}
|
|
10013
|
-
}
|
|
10014
|
-
this.updateSyncStatus({
|
|
10015
|
-
dataFlow: {
|
|
10016
|
-
downloading: true,
|
|
10017
|
-
downloadProgress: progress
|
|
10018
|
-
}
|
|
10019
|
-
});
|
|
10020
|
-
}
|
|
10021
|
-
async applyCheckpoint(checkpoint) {
|
|
10022
|
-
let result = await this.options.adapter.syncLocalDatabase(checkpoint);
|
|
10023
|
-
if (!result.checkpointValid) {
|
|
10024
|
-
this.logger.debug(`Checksum mismatch in checkpoint ${checkpoint.last_op_id}, will reconnect`);
|
|
10025
|
-
// This means checksums failed. Start again with a new checkpoint.
|
|
10026
|
-
// TODO: better back-off
|
|
10027
|
-
await new Promise((resolve) => setTimeout(resolve, 50));
|
|
10028
|
-
return { applied: false, endIteration: true };
|
|
10029
|
-
}
|
|
10030
|
-
else if (!result.ready) {
|
|
10031
|
-
this.logger.debug(`Could not apply checkpoint ${checkpoint.last_op_id} due to local data. We will retry applying the checkpoint after that upload is completed.`);
|
|
10032
|
-
return { applied: false, endIteration: false };
|
|
10033
|
-
}
|
|
10034
|
-
this.logger.debug(`Applied checkpoint ${checkpoint.last_op_id}`, checkpoint);
|
|
10035
|
-
this.updateSyncStatus({
|
|
10036
|
-
connected: true,
|
|
10037
|
-
lastSyncedAt: new Date(),
|
|
10038
|
-
dataFlow: {
|
|
10039
|
-
downloading: false,
|
|
10040
|
-
downloadProgress: null,
|
|
10041
|
-
downloadError: undefined
|
|
10042
|
-
}
|
|
10043
|
-
});
|
|
10044
|
-
return { applied: true, endIteration: false };
|
|
10045
|
-
}
|
|
10046
9586
|
updateSyncStatus(options) {
|
|
10047
9587
|
const updatedStatus = new SyncStatus({
|
|
10048
9588
|
connected: options.connected ?? this.syncStatus.connected,
|
|
@@ -11581,14 +11121,12 @@ class SqliteBucketStorage extends BaseObserver {
|
|
|
11581
11121
|
db;
|
|
11582
11122
|
logger;
|
|
11583
11123
|
tableNames;
|
|
11584
|
-
_hasCompletedSync;
|
|
11585
11124
|
updateListener;
|
|
11586
11125
|
_clientId;
|
|
11587
11126
|
constructor(db, logger = Logger.get('SqliteBucketStorage')) {
|
|
11588
11127
|
super();
|
|
11589
11128
|
this.db = db;
|
|
11590
11129
|
this.logger = logger;
|
|
11591
|
-
this._hasCompletedSync = false;
|
|
11592
11130
|
this.tableNames = new Set();
|
|
11593
11131
|
this.updateListener = db.registerListener({
|
|
11594
11132
|
tablesUpdated: (update) => {
|
|
@@ -11600,7 +11138,6 @@ class SqliteBucketStorage extends BaseObserver {
|
|
|
11600
11138
|
});
|
|
11601
11139
|
}
|
|
11602
11140
|
async init() {
|
|
11603
|
-
this._hasCompletedSync = false;
|
|
11604
11141
|
const existingTableRows = await this.db.getAll(`SELECT name FROM sqlite_master WHERE type='table' AND name GLOB 'ps_data_*'`);
|
|
11605
11142
|
for (const row of existingTableRows ?? []) {
|
|
11606
11143
|
this.tableNames.add(row.name);
|
|
@@ -11622,156 +11159,6 @@ class SqliteBucketStorage extends BaseObserver {
|
|
|
11622
11159
|
getMaxOpId() {
|
|
11623
11160
|
return MAX_OP_ID;
|
|
11624
11161
|
}
|
|
11625
|
-
/**
|
|
11626
|
-
* Reset any caches.
|
|
11627
|
-
*/
|
|
11628
|
-
startSession() { }
|
|
11629
|
-
async getBucketStates() {
|
|
11630
|
-
const result = await this.db.getAll("SELECT name as bucket, cast(last_op as TEXT) as op_id FROM ps_buckets WHERE pending_delete = 0 AND name != '$local'");
|
|
11631
|
-
return result;
|
|
11632
|
-
}
|
|
11633
|
-
async getBucketOperationProgress() {
|
|
11634
|
-
const rows = await this.db.getAll('SELECT name, count_at_last, count_since_last FROM ps_buckets');
|
|
11635
|
-
return Object.fromEntries(rows.map((r) => [r.name, { atLast: r.count_at_last, sinceLast: r.count_since_last }]));
|
|
11636
|
-
}
|
|
11637
|
-
async saveSyncData(batch, fixedKeyFormat = false) {
|
|
11638
|
-
await this.writeTransaction(async (tx) => {
|
|
11639
|
-
for (const b of batch.buckets) {
|
|
11640
|
-
await tx.execute('INSERT INTO powersync_operations(op, data) VALUES(?, ?)', [
|
|
11641
|
-
'save',
|
|
11642
|
-
JSON.stringify({ buckets: [b.toJSON(fixedKeyFormat)] })
|
|
11643
|
-
]);
|
|
11644
|
-
this.logger.debug(`Saved batch of data for bucket: ${b.bucket}, operations: ${b.data.length}`);
|
|
11645
|
-
}
|
|
11646
|
-
});
|
|
11647
|
-
}
|
|
11648
|
-
async removeBuckets(buckets) {
|
|
11649
|
-
for (const bucket of buckets) {
|
|
11650
|
-
await this.deleteBucket(bucket);
|
|
11651
|
-
}
|
|
11652
|
-
}
|
|
11653
|
-
/**
|
|
11654
|
-
* Mark a bucket for deletion.
|
|
11655
|
-
*/
|
|
11656
|
-
async deleteBucket(bucket) {
|
|
11657
|
-
await this.writeTransaction(async (tx) => {
|
|
11658
|
-
await tx.execute('INSERT INTO powersync_operations(op, data) VALUES(?, ?)', ['delete_bucket', bucket]);
|
|
11659
|
-
});
|
|
11660
|
-
this.logger.debug(`Done deleting bucket ${bucket}`);
|
|
11661
|
-
}
|
|
11662
|
-
async hasCompletedSync() {
|
|
11663
|
-
if (this._hasCompletedSync) {
|
|
11664
|
-
return true;
|
|
11665
|
-
}
|
|
11666
|
-
const r = await this.db.get(`SELECT powersync_last_synced_at() as synced_at`);
|
|
11667
|
-
const completed = r.synced_at != null;
|
|
11668
|
-
if (completed) {
|
|
11669
|
-
this._hasCompletedSync = true;
|
|
11670
|
-
}
|
|
11671
|
-
return completed;
|
|
11672
|
-
}
|
|
11673
|
-
async syncLocalDatabase(checkpoint, priority) {
|
|
11674
|
-
const r = await this.validateChecksums(checkpoint, priority);
|
|
11675
|
-
if (!r.checkpointValid) {
|
|
11676
|
-
this.logger.error('Checksums failed for', r.checkpointFailures);
|
|
11677
|
-
for (const b of r.checkpointFailures ?? []) {
|
|
11678
|
-
await this.deleteBucket(b);
|
|
11679
|
-
}
|
|
11680
|
-
return { ready: false, checkpointValid: false, checkpointFailures: r.checkpointFailures };
|
|
11681
|
-
}
|
|
11682
|
-
if (priority == null) {
|
|
11683
|
-
this.logger.debug(`Validated checksums checkpoint ${checkpoint.last_op_id}`);
|
|
11684
|
-
}
|
|
11685
|
-
else {
|
|
11686
|
-
this.logger.debug(`Validated checksums for partial checkpoint ${checkpoint.last_op_id}, priority ${priority}`);
|
|
11687
|
-
}
|
|
11688
|
-
let buckets = checkpoint.buckets;
|
|
11689
|
-
if (priority !== undefined) {
|
|
11690
|
-
buckets = buckets.filter((b) => hasMatchingPriority(priority, b));
|
|
11691
|
-
}
|
|
11692
|
-
const bucketNames = buckets.map((b) => b.bucket);
|
|
11693
|
-
await this.writeTransaction(async (tx) => {
|
|
11694
|
-
await tx.execute(`UPDATE ps_buckets SET last_op = ? WHERE name IN (SELECT json_each.value FROM json_each(?))`, [
|
|
11695
|
-
checkpoint.last_op_id,
|
|
11696
|
-
JSON.stringify(bucketNames)
|
|
11697
|
-
]);
|
|
11698
|
-
if (priority == null && checkpoint.write_checkpoint) {
|
|
11699
|
-
await tx.execute("UPDATE ps_buckets SET last_op = ? WHERE name = '$local'", [checkpoint.write_checkpoint]);
|
|
11700
|
-
}
|
|
11701
|
-
});
|
|
11702
|
-
const valid = await this.updateObjectsFromBuckets(checkpoint, priority);
|
|
11703
|
-
if (!valid) {
|
|
11704
|
-
return { ready: false, checkpointValid: true };
|
|
11705
|
-
}
|
|
11706
|
-
return {
|
|
11707
|
-
ready: true,
|
|
11708
|
-
checkpointValid: true
|
|
11709
|
-
};
|
|
11710
|
-
}
|
|
11711
|
-
/**
|
|
11712
|
-
* Atomically update the local state to the current checkpoint.
|
|
11713
|
-
*
|
|
11714
|
-
* This includes creating new tables, dropping old tables, and copying data over from the oplog.
|
|
11715
|
-
*/
|
|
11716
|
-
async updateObjectsFromBuckets(checkpoint, priority) {
|
|
11717
|
-
let arg = '';
|
|
11718
|
-
if (priority !== undefined) {
|
|
11719
|
-
const affectedBuckets = [];
|
|
11720
|
-
for (const desc of checkpoint.buckets) {
|
|
11721
|
-
if (hasMatchingPriority(priority, desc)) {
|
|
11722
|
-
affectedBuckets.push(desc.bucket);
|
|
11723
|
-
}
|
|
11724
|
-
}
|
|
11725
|
-
arg = JSON.stringify({ priority, buckets: affectedBuckets });
|
|
11726
|
-
}
|
|
11727
|
-
return this.writeTransaction(async (tx) => {
|
|
11728
|
-
const { insertId: result } = await tx.execute('INSERT INTO powersync_operations(op, data) VALUES(?, ?)', [
|
|
11729
|
-
'sync_local',
|
|
11730
|
-
arg
|
|
11731
|
-
]);
|
|
11732
|
-
if (result == 1) {
|
|
11733
|
-
if (priority == null) {
|
|
11734
|
-
const bucketToCount = Object.fromEntries(checkpoint.buckets.map((b) => [b.bucket, b.count]));
|
|
11735
|
-
// The two parameters could be replaced with one, but: https://github.com/powersync-ja/better-sqlite3/pull/6
|
|
11736
|
-
const jsonBucketCount = JSON.stringify(bucketToCount);
|
|
11737
|
-
await tx.execute("UPDATE ps_buckets SET count_since_last = 0, count_at_last = ?->name WHERE name != '$local' AND ?->name IS NOT NULL", [jsonBucketCount, jsonBucketCount]);
|
|
11738
|
-
}
|
|
11739
|
-
return true;
|
|
11740
|
-
}
|
|
11741
|
-
else {
|
|
11742
|
-
return false;
|
|
11743
|
-
}
|
|
11744
|
-
});
|
|
11745
|
-
}
|
|
11746
|
-
async validateChecksums(checkpoint, priority) {
|
|
11747
|
-
if (priority !== undefined) {
|
|
11748
|
-
// Only validate the buckets within the priority we care about
|
|
11749
|
-
const newBuckets = checkpoint.buckets.filter((cs) => hasMatchingPriority(priority, cs));
|
|
11750
|
-
checkpoint = { ...checkpoint, buckets: newBuckets };
|
|
11751
|
-
}
|
|
11752
|
-
const rs = await this.db.execute('SELECT powersync_validate_checkpoint(?) as result', [
|
|
11753
|
-
JSON.stringify({ ...checkpoint })
|
|
11754
|
-
]);
|
|
11755
|
-
const resultItem = rs.rows?.item(0);
|
|
11756
|
-
if (!resultItem) {
|
|
11757
|
-
return {
|
|
11758
|
-
checkpointValid: false,
|
|
11759
|
-
ready: false,
|
|
11760
|
-
checkpointFailures: []
|
|
11761
|
-
};
|
|
11762
|
-
}
|
|
11763
|
-
const result = JSON.parse(resultItem['result']);
|
|
11764
|
-
if (result['valid']) {
|
|
11765
|
-
return { ready: true, checkpointValid: true };
|
|
11766
|
-
}
|
|
11767
|
-
else {
|
|
11768
|
-
return {
|
|
11769
|
-
checkpointValid: false,
|
|
11770
|
-
ready: false,
|
|
11771
|
-
checkpointFailures: result['failed_buckets']
|
|
11772
|
-
};
|
|
11773
|
-
}
|
|
11774
|
-
}
|
|
11775
11162
|
async updateLocalTarget(cb) {
|
|
11776
11163
|
const rs1 = await this.db.getAll("SELECT target_op FROM ps_buckets WHERE name = '$local' AND target_op = CAST(? as INTEGER)", [MAX_OP_ID]);
|
|
11777
11164
|
if (!rs1.length) {
|
|
@@ -11862,12 +11249,6 @@ class SqliteBucketStorage extends BaseObserver {
|
|
|
11862
11249
|
async writeTransaction(callback, options) {
|
|
11863
11250
|
return this.db.writeTransaction(callback, options);
|
|
11864
11251
|
}
|
|
11865
|
-
/**
|
|
11866
|
-
* Set a target checkpoint.
|
|
11867
|
-
*/
|
|
11868
|
-
async setTargetCheckpoint(checkpoint) {
|
|
11869
|
-
// No-op for now
|
|
11870
|
-
}
|
|
11871
11252
|
async control(op, payload) {
|
|
11872
11253
|
return await this.writeTransaction(async (tx) => {
|
|
11873
11254
|
const [[raw]] = await tx.executeRaw('SELECT powersync_control(?, ?)', [op, payload]);
|
|
@@ -11891,20 +11272,6 @@ class SqliteBucketStorage extends BaseObserver {
|
|
|
11891
11272
|
}
|
|
11892
11273
|
static _subkeyMigrationKey = 'powersync_js_migrated_subkeys';
|
|
11893
11274
|
}
|
|
11894
|
-
function hasMatchingPriority(priority, bucket) {
|
|
11895
|
-
return bucket.priority != null && bucket.priority <= priority;
|
|
11896
|
-
}
|
|
11897
|
-
|
|
11898
|
-
// TODO JSON
|
|
11899
|
-
class SyncDataBatch {
|
|
11900
|
-
buckets;
|
|
11901
|
-
static fromJSON(json) {
|
|
11902
|
-
return new SyncDataBatch(json.buckets.map((bucket) => SyncDataBucket.fromRow(bucket)));
|
|
11903
|
-
}
|
|
11904
|
-
constructor(buckets) {
|
|
11905
|
-
this.buckets = buckets;
|
|
11906
|
-
}
|
|
11907
|
-
}
|
|
11908
11275
|
|
|
11909
11276
|
/**
|
|
11910
11277
|
* Thrown when an underlying database connection is closed.
|
|
@@ -12162,5 +11529,5 @@ const parseQuery = (query, parameters) => {
|
|
|
12162
11529
|
return { sqlStatement, parameters: parameters };
|
|
12163
11530
|
};
|
|
12164
11531
|
|
|
12165
|
-
export { ATTACHMENT_TABLE, AbortOperation, AbstractPowerSyncDatabase, AbstractPowerSyncDatabaseOpenFactory, AbstractQueryProcessor, AbstractRemote, AbstractStreamingSyncImplementation, ArrayComparator, AttachmentContext, AttachmentQueue, AttachmentService, AttachmentState, AttachmentTable, BaseObserver, Column, ColumnType, ConnectionClosedError, ConnectionManager, ControlledExecutor, CrudBatch, CrudEntry, CrudTransaction, DBAdapterDefaultMixin, DBGetUtilsDefaultMixin, DEFAULT_CRUD_BATCH_LIMIT, DEFAULT_CRUD_UPLOAD_THROTTLE_MS, DEFAULT_INDEX_COLUMN_OPTIONS, DEFAULT_INDEX_OPTIONS, DEFAULT_LOCK_TIMEOUT_MS, DEFAULT_POWERSYNC_CLOSE_OPTIONS, DEFAULT_POWERSYNC_DB_OPTIONS, DEFAULT_REMOTE_LOGGER, DEFAULT_REMOTE_OPTIONS, DEFAULT_RETRY_DELAY_MS, DEFAULT_ROW_COMPARATOR, DEFAULT_STREAMING_SYNC_OPTIONS, DEFAULT_STREAM_CONNECTION_OPTIONS, DEFAULT_SYNC_CLIENT_IMPLEMENTATION, DEFAULT_TABLE_OPTIONS, DEFAULT_WATCH_QUERY_OPTIONS, DEFAULT_WATCH_THROTTLE_MS, DiffTriggerOperation, DifferentialQueryProcessor, EMPTY_DIFFERENTIAL, EncodingType, FalsyComparator, FetchImplementationProvider, FetchStrategy, GetAllQuery, Index, IndexedColumn, InvalidSQLCharacters, LockType, LogLevel, MAX_AMOUNT_OF_COLUMNS, MAX_OP_ID, MEMORY_TRIGGER_CLAIM_MANAGER, Mutex, OnChangeQueryProcessor,
|
|
11532
|
+
export { ATTACHMENT_TABLE, AbortOperation, AbstractPowerSyncDatabase, AbstractPowerSyncDatabaseOpenFactory, AbstractQueryProcessor, AbstractRemote, AbstractStreamingSyncImplementation, ArrayComparator, AttachmentContext, AttachmentQueue, AttachmentService, AttachmentState, AttachmentTable, BaseObserver, Column, ColumnType, ConnectionClosedError, ConnectionManager, ControlledExecutor, CrudBatch, CrudEntry, CrudTransaction, DBAdapterDefaultMixin, DBGetUtilsDefaultMixin, DEFAULT_CRUD_BATCH_LIMIT, DEFAULT_CRUD_UPLOAD_THROTTLE_MS, DEFAULT_INDEX_COLUMN_OPTIONS, DEFAULT_INDEX_OPTIONS, DEFAULT_LOCK_TIMEOUT_MS, DEFAULT_POWERSYNC_CLOSE_OPTIONS, DEFAULT_POWERSYNC_DB_OPTIONS, DEFAULT_REMOTE_LOGGER, DEFAULT_REMOTE_OPTIONS, DEFAULT_RETRY_DELAY_MS, DEFAULT_ROW_COMPARATOR, DEFAULT_STREAMING_SYNC_OPTIONS, DEFAULT_STREAM_CONNECTION_OPTIONS, DEFAULT_SYNC_CLIENT_IMPLEMENTATION, DEFAULT_TABLE_OPTIONS, DEFAULT_WATCH_QUERY_OPTIONS, DEFAULT_WATCH_THROTTLE_MS, DiffTriggerOperation, DifferentialQueryProcessor, EMPTY_DIFFERENTIAL, EncodingType, FalsyComparator, FetchImplementationProvider, FetchStrategy, GetAllQuery, Index, IndexedColumn, InvalidSQLCharacters, LockType, LogLevel, MAX_AMOUNT_OF_COLUMNS, MAX_OP_ID, MEMORY_TRIGGER_CLAIM_MANAGER, Mutex, OnChangeQueryProcessor, PSInternalTable, PowerSyncControlCommand, RowUpdateType, Schema, Semaphore, SqliteBucketStorage, SyncClientImplementation, SyncProgress, SyncStatus, SyncStreamConnectionMethod, SyncingService, Table, TableV2, TriggerManagerImpl, UpdateType, UploadQueueStats, WatchedQueryListenerEvent, attachmentFromSql, column, compilableQueryWatch, createBaseLogger, createLogger, extractTableUpdates, isBatchedUpdateNotification, isDBAdapter, isPowerSyncDatabaseOptionsWithSettings, isSQLOpenFactory, isSQLOpenOptions, parseQuery, runOnSchemaChange, sanitizeSQL, sanitizeUUID, timeoutSignal };
|
|
12166
11533
|
//# sourceMappingURL=bundle.node.mjs.map
|