@powersync/common 0.0.0-dev-20260414110516 → 0.0.0-dev-20260504100448
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bundle.cjs +33 -665
- package/dist/bundle.cjs.map +1 -1
- package/dist/bundle.mjs +34 -654
- package/dist/bundle.mjs.map +1 -1
- package/dist/bundle.node.cjs +33 -665
- package/dist/bundle.node.cjs.map +1 -1
- package/dist/bundle.node.mjs +34 -654
- package/dist/bundle.node.mjs.map +1 -1
- package/dist/index.d.cts +22 -369
- package/legacy/sync_protocol.d.ts +103 -0
- package/lib/client/sync/bucket/BucketStorageAdapter.d.ts +1 -63
- package/lib/client/sync/bucket/BucketStorageAdapter.js.map +1 -1
- package/lib/client/sync/bucket/SqliteBucketStorage.d.ts +1 -28
- package/lib/client/sync/bucket/SqliteBucketStorage.js +0 -162
- package/lib/client/sync/bucket/SqliteBucketStorage.js.map +1 -1
- package/lib/client/sync/stream/AbstractRemote.d.ts +2 -12
- package/lib/client/sync/stream/AbstractRemote.js +3 -13
- package/lib/client/sync/stream/AbstractRemote.js.map +1 -1
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.d.ts +12 -35
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js +29 -337
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js.map +1 -1
- package/lib/client/sync/stream/JsonValue.d.ts +7 -0
- package/lib/client/sync/stream/JsonValue.js +2 -0
- package/lib/client/sync/stream/JsonValue.js.map +1 -0
- package/lib/client/sync/stream/core-instruction.d.ts +1 -2
- package/lib/client/sync/stream/core-instruction.js.map +1 -1
- package/lib/db/crud/SyncStatus.d.ts +0 -4
- package/lib/db/crud/SyncStatus.js +0 -4
- package/lib/db/crud/SyncStatus.js.map +1 -1
- package/lib/db/schema/RawTable.d.ts +0 -5
- package/lib/db/schema/Schema.d.ts +0 -2
- package/lib/db/schema/Schema.js +0 -2
- package/lib/db/schema/Schema.js.map +1 -1
- package/lib/index.d.ts +1 -5
- package/lib/index.js +1 -5
- package/lib/index.js.map +1 -1
- package/package.json +7 -4
- package/src/client/sync/bucket/BucketStorageAdapter.ts +1 -70
- package/src/client/sync/bucket/SqliteBucketStorage.ts +1 -197
- package/src/client/sync/stream/AbstractRemote.ts +5 -27
- package/src/client/sync/stream/AbstractStreamingSyncImplementation.ts +41 -407
- package/src/client/sync/stream/JsonValue.ts +8 -0
- package/src/client/sync/stream/core-instruction.ts +1 -2
- package/src/db/crud/SyncStatus.ts +0 -4
- package/src/db/schema/RawTable.ts +0 -5
- package/src/db/schema/Schema.ts +0 -2
- package/src/index.ts +1 -5
- package/lib/client/sync/bucket/OpType.d.ts +0 -16
- package/lib/client/sync/bucket/OpType.js +0 -23
- package/lib/client/sync/bucket/OpType.js.map +0 -1
- package/lib/client/sync/bucket/OplogEntry.d.ts +0 -23
- package/lib/client/sync/bucket/OplogEntry.js +0 -36
- package/lib/client/sync/bucket/OplogEntry.js.map +0 -1
- package/lib/client/sync/bucket/SyncDataBatch.d.ts +0 -6
- package/lib/client/sync/bucket/SyncDataBatch.js +0 -12
- package/lib/client/sync/bucket/SyncDataBatch.js.map +0 -1
- package/lib/client/sync/bucket/SyncDataBucket.d.ts +0 -40
- package/lib/client/sync/bucket/SyncDataBucket.js +0 -40
- package/lib/client/sync/bucket/SyncDataBucket.js.map +0 -1
- package/lib/client/sync/stream/streaming-sync-types.d.ts +0 -143
- package/lib/client/sync/stream/streaming-sync-types.js +0 -26
- package/lib/client/sync/stream/streaming-sync-types.js.map +0 -1
- package/src/client/sync/bucket/OpType.ts +0 -23
- package/src/client/sync/bucket/OplogEntry.ts +0 -50
- package/src/client/sync/bucket/SyncDataBatch.ts +0 -11
- package/src/client/sync/bucket/SyncDataBucket.ts +0 -49
- package/src/client/sync/stream/streaming-sync-types.ts +0 -210
package/dist/bundle.mjs
CHANGED
|
@@ -2129,16 +2129,12 @@ class SyncStatus {
|
|
|
2129
2129
|
*
|
|
2130
2130
|
* This returns null when the database is currently being opened and we don't have reliable information about all
|
|
2131
2131
|
* included streams yet.
|
|
2132
|
-
*
|
|
2133
|
-
* @experimental Sync streams are currently in alpha.
|
|
2134
2132
|
*/
|
|
2135
2133
|
get syncStreams() {
|
|
2136
2134
|
return this.options.dataFlow?.internalStreamSubscriptions?.map((core) => new SyncStreamStatusView(this, core));
|
|
2137
2135
|
}
|
|
2138
2136
|
/**
|
|
2139
2137
|
* If the `stream` appears in {@link syncStreams}, returns the current status for that stream.
|
|
2140
|
-
*
|
|
2141
|
-
* @experimental Sync streams are currently in alpha.
|
|
2142
2138
|
*/
|
|
2143
2139
|
forStream(stream) {
|
|
2144
2140
|
const asJson = JSON.stringify(stream.parameters);
|
|
@@ -3491,103 +3487,6 @@ class AbortOperation extends Error {
|
|
|
3491
3487
|
}
|
|
3492
3488
|
}
|
|
3493
3489
|
|
|
3494
|
-
var OpTypeEnum;
|
|
3495
|
-
(function (OpTypeEnum) {
|
|
3496
|
-
OpTypeEnum[OpTypeEnum["CLEAR"] = 1] = "CLEAR";
|
|
3497
|
-
OpTypeEnum[OpTypeEnum["MOVE"] = 2] = "MOVE";
|
|
3498
|
-
OpTypeEnum[OpTypeEnum["PUT"] = 3] = "PUT";
|
|
3499
|
-
OpTypeEnum[OpTypeEnum["REMOVE"] = 4] = "REMOVE";
|
|
3500
|
-
})(OpTypeEnum || (OpTypeEnum = {}));
|
|
3501
|
-
/**
|
|
3502
|
-
* Used internally for sync buckets.
|
|
3503
|
-
*/
|
|
3504
|
-
class OpType {
|
|
3505
|
-
value;
|
|
3506
|
-
static fromJSON(jsonValue) {
|
|
3507
|
-
return new OpType(OpTypeEnum[jsonValue]);
|
|
3508
|
-
}
|
|
3509
|
-
constructor(value) {
|
|
3510
|
-
this.value = value;
|
|
3511
|
-
}
|
|
3512
|
-
toJSON() {
|
|
3513
|
-
return Object.entries(OpTypeEnum).find(([, value]) => value === this.value)[0];
|
|
3514
|
-
}
|
|
3515
|
-
}
|
|
3516
|
-
|
|
3517
|
-
class OplogEntry {
|
|
3518
|
-
op_id;
|
|
3519
|
-
op;
|
|
3520
|
-
checksum;
|
|
3521
|
-
subkey;
|
|
3522
|
-
object_type;
|
|
3523
|
-
object_id;
|
|
3524
|
-
data;
|
|
3525
|
-
static fromRow(row) {
|
|
3526
|
-
return new OplogEntry(row.op_id, OpType.fromJSON(row.op), row.checksum, row.subkey, row.object_type, row.object_id, row.data);
|
|
3527
|
-
}
|
|
3528
|
-
constructor(op_id, op, checksum, subkey, object_type, object_id, data) {
|
|
3529
|
-
this.op_id = op_id;
|
|
3530
|
-
this.op = op;
|
|
3531
|
-
this.checksum = checksum;
|
|
3532
|
-
this.subkey = subkey;
|
|
3533
|
-
this.object_type = object_type;
|
|
3534
|
-
this.object_id = object_id;
|
|
3535
|
-
this.data = data;
|
|
3536
|
-
}
|
|
3537
|
-
toJSON(fixedKeyEncoding = false) {
|
|
3538
|
-
return {
|
|
3539
|
-
op_id: this.op_id,
|
|
3540
|
-
op: this.op.toJSON(),
|
|
3541
|
-
object_type: this.object_type,
|
|
3542
|
-
object_id: this.object_id,
|
|
3543
|
-
checksum: this.checksum,
|
|
3544
|
-
data: this.data,
|
|
3545
|
-
// Older versions of the JS SDK used to always JSON.stringify here. That has always been wrong,
|
|
3546
|
-
// but we need to migrate gradually to not break existing databases.
|
|
3547
|
-
subkey: fixedKeyEncoding ? this.subkey : JSON.stringify(this.subkey)
|
|
3548
|
-
};
|
|
3549
|
-
}
|
|
3550
|
-
}
|
|
3551
|
-
|
|
3552
|
-
class SyncDataBucket {
|
|
3553
|
-
bucket;
|
|
3554
|
-
data;
|
|
3555
|
-
has_more;
|
|
3556
|
-
after;
|
|
3557
|
-
next_after;
|
|
3558
|
-
static fromRow(row) {
|
|
3559
|
-
return new SyncDataBucket(row.bucket, row.data.map((entry) => OplogEntry.fromRow(entry)), row.has_more ?? false, row.after, row.next_after);
|
|
3560
|
-
}
|
|
3561
|
-
constructor(bucket, data,
|
|
3562
|
-
/**
|
|
3563
|
-
* True if the response does not contain all the data for this bucket, and another request must be made.
|
|
3564
|
-
*/
|
|
3565
|
-
has_more,
|
|
3566
|
-
/**
|
|
3567
|
-
* The `after` specified in the request.
|
|
3568
|
-
*/
|
|
3569
|
-
after,
|
|
3570
|
-
/**
|
|
3571
|
-
* Use this for the next request.
|
|
3572
|
-
*/
|
|
3573
|
-
next_after) {
|
|
3574
|
-
this.bucket = bucket;
|
|
3575
|
-
this.data = data;
|
|
3576
|
-
this.has_more = has_more;
|
|
3577
|
-
this.after = after;
|
|
3578
|
-
this.next_after = next_after;
|
|
3579
|
-
}
|
|
3580
|
-
toJSON(fixedKeyEncoding = false) {
|
|
3581
|
-
return {
|
|
3582
|
-
bucket: this.bucket,
|
|
3583
|
-
has_more: this.has_more,
|
|
3584
|
-
after: this.after,
|
|
3585
|
-
next_after: this.next_after,
|
|
3586
|
-
data: this.data.map((entry) => entry.toJSON(fixedKeyEncoding))
|
|
3587
|
-
};
|
|
3588
|
-
}
|
|
3589
|
-
}
|
|
3590
|
-
|
|
3591
3490
|
var buffer = {};
|
|
3592
3491
|
|
|
3593
3492
|
var base64Js = {};
|
|
@@ -10743,7 +10642,7 @@ function requireDist () {
|
|
|
10743
10642
|
|
|
10744
10643
|
var distExports = requireDist();
|
|
10745
10644
|
|
|
10746
|
-
var version = "1.
|
|
10645
|
+
var version = "1.52.0";
|
|
10747
10646
|
var PACKAGE = {
|
|
10748
10647
|
version: version};
|
|
10749
10648
|
|
|
@@ -11300,22 +11199,12 @@ class AbstractRemote {
|
|
|
11300
11199
|
* Returns a data stream of sync line data, fetched via RSocket-over-WebSocket.
|
|
11301
11200
|
*
|
|
11302
11201
|
* The only mechanism to abort the returned stream is to use the abort signal in {@link SocketSyncStreamOptions}.
|
|
11303
|
-
*
|
|
11304
|
-
* @param bson A BSON encoder and decoder. When set, the data stream will be requested with a BSON payload
|
|
11305
|
-
* (required for compatibility with older sync services).
|
|
11306
11202
|
*/
|
|
11307
|
-
async socketStreamRaw(options
|
|
11203
|
+
async socketStreamRaw(options) {
|
|
11308
11204
|
const { path, fetchStrategy = FetchStrategy.Buffered } = options;
|
|
11309
|
-
const mimeType =
|
|
11205
|
+
const mimeType = 'application/json';
|
|
11310
11206
|
function toBuffer(js) {
|
|
11311
|
-
|
|
11312
|
-
if (bson != null) {
|
|
11313
|
-
contents = bson.serialize(js);
|
|
11314
|
-
}
|
|
11315
|
-
else {
|
|
11316
|
-
contents = JSON.stringify(js);
|
|
11317
|
-
}
|
|
11318
|
-
return bufferExports.Buffer.from(contents);
|
|
11207
|
+
return bufferExports.Buffer.from(JSON.stringify(js));
|
|
11319
11208
|
}
|
|
11320
11209
|
const syncQueueRequestSize = fetchStrategy == FetchStrategy.Buffered ? 10 : 1;
|
|
11321
11210
|
const request = await this.buildRequest(path);
|
|
@@ -11617,32 +11506,6 @@ function coreStatusToJs(status) {
|
|
|
11617
11506
|
};
|
|
11618
11507
|
}
|
|
11619
11508
|
|
|
11620
|
-
function isStreamingSyncData(line) {
|
|
11621
|
-
return line.data != null;
|
|
11622
|
-
}
|
|
11623
|
-
function isStreamingKeepalive(line) {
|
|
11624
|
-
return line.token_expires_in != null;
|
|
11625
|
-
}
|
|
11626
|
-
function isStreamingSyncCheckpoint(line) {
|
|
11627
|
-
return line.checkpoint != null;
|
|
11628
|
-
}
|
|
11629
|
-
function isStreamingSyncCheckpointComplete(line) {
|
|
11630
|
-
return line.checkpoint_complete != null;
|
|
11631
|
-
}
|
|
11632
|
-
function isStreamingSyncCheckpointPartiallyComplete(line) {
|
|
11633
|
-
return line.partial_checkpoint_complete != null;
|
|
11634
|
-
}
|
|
11635
|
-
function isStreamingSyncCheckpointDiff(line) {
|
|
11636
|
-
return line.checkpoint_diff != null;
|
|
11637
|
-
}
|
|
11638
|
-
function isContinueCheckpointRequest(request) {
|
|
11639
|
-
return (Array.isArray(request.buckets) &&
|
|
11640
|
-
typeof request.checkpoint_token == 'string');
|
|
11641
|
-
}
|
|
11642
|
-
function isSyncNewCheckpointRequest(request) {
|
|
11643
|
-
return typeof request.request_checkpoint == 'object';
|
|
11644
|
-
}
|
|
11645
|
-
|
|
11646
11509
|
var LockType;
|
|
11647
11510
|
(function (LockType) {
|
|
11648
11511
|
LockType["CRUD"] = "crud";
|
|
@@ -11655,35 +11518,21 @@ var SyncStreamConnectionMethod;
|
|
|
11655
11518
|
})(SyncStreamConnectionMethod || (SyncStreamConnectionMethod = {}));
|
|
11656
11519
|
var SyncClientImplementation;
|
|
11657
11520
|
(function (SyncClientImplementation) {
|
|
11658
|
-
/**
|
|
11659
|
-
* Decodes and handles sync lines received from the sync service in JavaScript.
|
|
11660
|
-
*
|
|
11661
|
-
* This is the default option.
|
|
11662
|
-
*
|
|
11663
|
-
* @deprecated We recommend the {@link RUST} client implementation for all apps. If you have issues with
|
|
11664
|
-
* the Rust client, please file an issue or reach out to us. The JavaScript client will be removed in a future
|
|
11665
|
-
* version of the PowerSync SDK.
|
|
11666
|
-
*/
|
|
11667
|
-
SyncClientImplementation["JAVASCRIPT"] = "js";
|
|
11668
11521
|
/**
|
|
11669
11522
|
* This implementation offloads the sync line decoding and handling into the PowerSync
|
|
11670
11523
|
* core extension.
|
|
11671
11524
|
*
|
|
11672
|
-
* This
|
|
11673
|
-
* recommended client implementation for all apps.
|
|
11525
|
+
* This is the only option, as an older JavaScript client implementation has been removed from the SDK.
|
|
11674
11526
|
*
|
|
11675
11527
|
* ## Compatibility warning
|
|
11676
11528
|
*
|
|
11677
11529
|
* The Rust sync client stores sync data in a format that is slightly different than the one used
|
|
11678
|
-
* by the old
|
|
11679
|
-
*
|
|
11680
|
-
* Further, the {@link JAVASCRIPT} client in recent versions of the PowerSync JS SDK (starting from
|
|
11681
|
-
* the version introducing {@link RUST} as an option) also supports the new format, so you can switch
|
|
11682
|
-
* back to {@link JAVASCRIPT} later.
|
|
11530
|
+
* by the old JavaScript client. When adopting the {@link RUST} client on existing databases, the PowerSync SDK will
|
|
11531
|
+
* migrate the format automatically.
|
|
11683
11532
|
*
|
|
11684
|
-
*
|
|
11685
|
-
*
|
|
11686
|
-
*
|
|
11533
|
+
* SDK versions supporting both the JavaScript and the Rust client support both formats with the JavaScript client
|
|
11534
|
+
* implementaiton. However, downgrading to an SDK version that only supports the JavaScript client would not be
|
|
11535
|
+
* possible anymore. Problematic SDK versions have been released before 2025-06-09.
|
|
11687
11536
|
*/
|
|
11688
11537
|
SyncClientImplementation["RUST"] = "rust";
|
|
11689
11538
|
})(SyncClientImplementation || (SyncClientImplementation = {}));
|
|
@@ -11706,13 +11555,7 @@ const DEFAULT_STREAM_CONNECTION_OPTIONS = {
|
|
|
11706
11555
|
serializedSchema: undefined,
|
|
11707
11556
|
includeDefaultStreams: true
|
|
11708
11557
|
};
|
|
11709
|
-
// The priority we assume when we receive checkpoint lines where no priority is set.
|
|
11710
|
-
// This is the default priority used by the sync service, but can be set to an arbitrary
|
|
11711
|
-
// value since sync services without priorities also won't send partial sync completion
|
|
11712
|
-
// messages.
|
|
11713
|
-
const FALLBACK_PRIORITY = 3;
|
|
11714
11558
|
class AbstractStreamingSyncImplementation extends BaseObserver {
|
|
11715
|
-
_lastSyncedAt;
|
|
11716
11559
|
options;
|
|
11717
11560
|
abortController;
|
|
11718
11561
|
// In rare cases, mostly for tests, uploads can be triggered without being properly connected.
|
|
@@ -11722,6 +11565,7 @@ class AbstractStreamingSyncImplementation extends BaseObserver {
|
|
|
11722
11565
|
streamingSyncPromise;
|
|
11723
11566
|
logger;
|
|
11724
11567
|
activeStreams;
|
|
11568
|
+
connectionMayHaveChanged = false;
|
|
11725
11569
|
isUploadingCrud = false;
|
|
11726
11570
|
notifyCompletedUploads;
|
|
11727
11571
|
handleActiveStreamsChange;
|
|
@@ -11801,9 +11645,6 @@ class AbstractStreamingSyncImplementation extends BaseObserver {
|
|
|
11801
11645
|
this.crudUpdateListener = undefined;
|
|
11802
11646
|
this.uploadAbortController?.abort();
|
|
11803
11647
|
}
|
|
11804
|
-
async hasCompletedSync() {
|
|
11805
|
-
return this.options.adapter.hasCompletedSync();
|
|
11806
|
-
}
|
|
11807
11648
|
async getWriteCheckpoint() {
|
|
11808
11649
|
const clientId = await this.options.adapter.getClientId();
|
|
11809
11650
|
let path = `/write-checkpoint2.json?client_id=${clientId}`;
|
|
@@ -11885,7 +11726,7 @@ The next upload iteration will be delayed.`);
|
|
|
11885
11726
|
});
|
|
11886
11727
|
}
|
|
11887
11728
|
}
|
|
11888
|
-
this.uploadAbortController =
|
|
11729
|
+
this.uploadAbortController = undefined;
|
|
11889
11730
|
}
|
|
11890
11731
|
});
|
|
11891
11732
|
}
|
|
@@ -12001,6 +11842,11 @@ The next upload iteration will be delayed.`);
|
|
|
12001
11842
|
shouldDelayRetry = false;
|
|
12002
11843
|
// A disconnect was requested, we should not delay since there is no explicit retry
|
|
12003
11844
|
}
|
|
11845
|
+
else if (this.connectionMayHaveChanged && ex.message?.indexOf('No iteration is active') >= 0) {
|
|
11846
|
+
this.connectionMayHaveChanged = false;
|
|
11847
|
+
this.logger.info('Sync error after changed connection, retrying immediately');
|
|
11848
|
+
shouldDelayRetry = false;
|
|
11849
|
+
}
|
|
12004
11850
|
else {
|
|
12005
11851
|
this.logger.error(ex);
|
|
12006
11852
|
}
|
|
@@ -12031,17 +11877,14 @@ The next upload iteration will be delayed.`);
|
|
|
12031
11877
|
// Mark as disconnected if here
|
|
12032
11878
|
this.updateSyncStatus({ connected: false, connecting: false });
|
|
12033
11879
|
}
|
|
12034
|
-
|
|
12035
|
-
|
|
12036
|
-
|
|
12037
|
-
|
|
12038
|
-
|
|
12039
|
-
|
|
12040
|
-
|
|
12041
|
-
|
|
12042
|
-
localDescriptions.set(entry.bucket, null);
|
|
12043
|
-
}
|
|
12044
|
-
return [req, localDescriptions];
|
|
11880
|
+
markConnectionMayHaveChanged() {
|
|
11881
|
+
// By setting this field, we'll immediately retry if the next sync event causes an error triggered by us not having
|
|
11882
|
+
// an active sync iteration on the connection in use.
|
|
11883
|
+
this.connectionMayHaveChanged = true;
|
|
11884
|
+
// This triggers a `powersync_control` invocation if a sync iteration is currently active. This is a cheap call to
|
|
11885
|
+
// make when no subscriptions have actually changed, we're mainly interested in this immediately throwing if no
|
|
11886
|
+
// iteration is active. That allows us to reconnect ASAP, instead of having to wait for the next sync line.
|
|
11887
|
+
this.handleActiveStreamsChange?.();
|
|
12045
11888
|
}
|
|
12046
11889
|
/**
|
|
12047
11890
|
* Older versions of the JS SDK used to encode subkeys as JSON in {@link OplogEntry.toJSON}.
|
|
@@ -12084,19 +11927,13 @@ The next upload iteration will be delayed.`);
|
|
|
12084
11927
|
}
|
|
12085
11928
|
const clientImplementation = resolvedOptions.clientImplementation;
|
|
12086
11929
|
this.updateSyncStatus({ clientImplementation });
|
|
12087
|
-
|
|
12088
|
-
|
|
12089
|
-
return null;
|
|
12090
|
-
}
|
|
12091
|
-
else {
|
|
12092
|
-
await this.requireKeyFormat(true);
|
|
12093
|
-
return await this.rustSyncIteration(signal, resolvedOptions);
|
|
12094
|
-
}
|
|
11930
|
+
await this.requireKeyFormat(true);
|
|
11931
|
+
return await this.rustSyncIteration(signal, resolvedOptions);
|
|
12095
11932
|
}
|
|
12096
11933
|
});
|
|
12097
11934
|
}
|
|
12098
11935
|
async receiveSyncLines(data) {
|
|
12099
|
-
const { options, connection
|
|
11936
|
+
const { options, connection } = data;
|
|
12100
11937
|
const remote = this.options.remote;
|
|
12101
11938
|
if (connection.connectionMethod == SyncStreamConnectionMethod.HTTP) {
|
|
12102
11939
|
return await remote.fetchStream(options);
|
|
@@ -12105,232 +11942,8 @@ The next upload iteration will be delayed.`);
|
|
|
12105
11942
|
return await this.options.remote.socketStreamRaw({
|
|
12106
11943
|
...options,
|
|
12107
11944
|
...{ fetchStrategy: connection.fetchStrategy }
|
|
12108
|
-
}
|
|
12109
|
-
}
|
|
12110
|
-
}
|
|
12111
|
-
async legacyStreamingSyncIteration(signal, resolvedOptions) {
|
|
12112
|
-
const rawTables = resolvedOptions.serializedSchema?.raw_tables;
|
|
12113
|
-
if (rawTables != null && rawTables.length) {
|
|
12114
|
-
this.logger.warn('Raw tables require the Rust-based sync client. The JS client will ignore them.');
|
|
12115
|
-
}
|
|
12116
|
-
if (this.activeStreams.length) {
|
|
12117
|
-
this.logger.error('Sync streams require `clientImplementation: SyncClientImplementation.RUST` when connecting.');
|
|
12118
|
-
}
|
|
12119
|
-
this.logger.debug('Streaming sync iteration started');
|
|
12120
|
-
this.options.adapter.startSession();
|
|
12121
|
-
let [req, bucketMap] = await this.collectLocalBucketState();
|
|
12122
|
-
let targetCheckpoint = null;
|
|
12123
|
-
// A checkpoint that has been validated but not applied (e.g. due to pending local writes)
|
|
12124
|
-
let pendingValidatedCheckpoint = null;
|
|
12125
|
-
const clientId = await this.options.adapter.getClientId();
|
|
12126
|
-
const usingFixedKeyFormat = await this.requireKeyFormat(false);
|
|
12127
|
-
this.logger.debug('Requesting stream from server');
|
|
12128
|
-
const syncOptions = {
|
|
12129
|
-
path: '/sync/stream',
|
|
12130
|
-
abortSignal: signal,
|
|
12131
|
-
data: {
|
|
12132
|
-
buckets: req,
|
|
12133
|
-
include_checksum: true,
|
|
12134
|
-
raw_data: true,
|
|
12135
|
-
parameters: resolvedOptions.params,
|
|
12136
|
-
app_metadata: resolvedOptions.appMetadata,
|
|
12137
|
-
client_id: clientId
|
|
12138
|
-
}
|
|
12139
|
-
};
|
|
12140
|
-
const bson = await this.options.remote.getBSON();
|
|
12141
|
-
const source = await this.receiveSyncLines({
|
|
12142
|
-
options: syncOptions,
|
|
12143
|
-
connection: resolvedOptions,
|
|
12144
|
-
bson
|
|
12145
|
-
});
|
|
12146
|
-
const stream = injectable(map(source, (line) => {
|
|
12147
|
-
if (typeof line == 'string') {
|
|
12148
|
-
return JSON.parse(line);
|
|
12149
|
-
}
|
|
12150
|
-
else {
|
|
12151
|
-
return bson.deserialize(line);
|
|
12152
|
-
}
|
|
12153
|
-
}));
|
|
12154
|
-
this.logger.debug('Stream established. Processing events');
|
|
12155
|
-
this.notifyCompletedUploads = () => {
|
|
12156
|
-
stream.inject({ crud_upload_completed: null });
|
|
12157
|
-
};
|
|
12158
|
-
while (true) {
|
|
12159
|
-
const { value: line, done } = await stream.next();
|
|
12160
|
-
if (done) {
|
|
12161
|
-
// The stream has closed while waiting
|
|
12162
|
-
return;
|
|
12163
|
-
}
|
|
12164
|
-
if ('crud_upload_completed' in line) {
|
|
12165
|
-
if (pendingValidatedCheckpoint != null) {
|
|
12166
|
-
const { applied, endIteration } = await this.applyCheckpoint(pendingValidatedCheckpoint);
|
|
12167
|
-
if (applied) {
|
|
12168
|
-
pendingValidatedCheckpoint = null;
|
|
12169
|
-
}
|
|
12170
|
-
else if (endIteration) {
|
|
12171
|
-
break;
|
|
12172
|
-
}
|
|
12173
|
-
}
|
|
12174
|
-
continue;
|
|
12175
|
-
}
|
|
12176
|
-
// A connection is active and messages are being received
|
|
12177
|
-
if (!this.syncStatus.connected) {
|
|
12178
|
-
// There is a connection now
|
|
12179
|
-
Promise.resolve().then(() => this.triggerCrudUpload());
|
|
12180
|
-
this.updateSyncStatus({
|
|
12181
|
-
connected: true
|
|
12182
|
-
});
|
|
12183
|
-
}
|
|
12184
|
-
if (isStreamingSyncCheckpoint(line)) {
|
|
12185
|
-
targetCheckpoint = line.checkpoint;
|
|
12186
|
-
// New checkpoint - existing validated checkpoint is no longer valid
|
|
12187
|
-
pendingValidatedCheckpoint = null;
|
|
12188
|
-
const bucketsToDelete = new Set(bucketMap.keys());
|
|
12189
|
-
const newBuckets = new Map();
|
|
12190
|
-
for (const checksum of line.checkpoint.buckets) {
|
|
12191
|
-
newBuckets.set(checksum.bucket, {
|
|
12192
|
-
name: checksum.bucket,
|
|
12193
|
-
priority: checksum.priority ?? FALLBACK_PRIORITY
|
|
12194
|
-
});
|
|
12195
|
-
bucketsToDelete.delete(checksum.bucket);
|
|
12196
|
-
}
|
|
12197
|
-
if (bucketsToDelete.size > 0) {
|
|
12198
|
-
this.logger.debug('Removing buckets', [...bucketsToDelete]);
|
|
12199
|
-
}
|
|
12200
|
-
bucketMap = newBuckets;
|
|
12201
|
-
await this.options.adapter.removeBuckets([...bucketsToDelete]);
|
|
12202
|
-
await this.options.adapter.setTargetCheckpoint(targetCheckpoint);
|
|
12203
|
-
await this.updateSyncStatusForStartingCheckpoint(targetCheckpoint);
|
|
12204
|
-
}
|
|
12205
|
-
else if (isStreamingSyncCheckpointComplete(line)) {
|
|
12206
|
-
const result = await this.applyCheckpoint(targetCheckpoint);
|
|
12207
|
-
if (result.endIteration) {
|
|
12208
|
-
return;
|
|
12209
|
-
}
|
|
12210
|
-
else if (!result.applied) {
|
|
12211
|
-
// "Could not apply checkpoint due to local data". We need to retry after
|
|
12212
|
-
// finishing uploads.
|
|
12213
|
-
pendingValidatedCheckpoint = targetCheckpoint;
|
|
12214
|
-
}
|
|
12215
|
-
else {
|
|
12216
|
-
// Nothing to retry later. This would likely already be null from the last
|
|
12217
|
-
// checksum or checksum_diff operation, but we make sure.
|
|
12218
|
-
pendingValidatedCheckpoint = null;
|
|
12219
|
-
}
|
|
12220
|
-
}
|
|
12221
|
-
else if (isStreamingSyncCheckpointPartiallyComplete(line)) {
|
|
12222
|
-
const priority = line.partial_checkpoint_complete.priority;
|
|
12223
|
-
this.logger.debug('Partial checkpoint complete', priority);
|
|
12224
|
-
const result = await this.options.adapter.syncLocalDatabase(targetCheckpoint, priority);
|
|
12225
|
-
if (!result.checkpointValid) {
|
|
12226
|
-
// This means checksums failed. Start again with a new checkpoint.
|
|
12227
|
-
// TODO: better back-off
|
|
12228
|
-
await new Promise((resolve) => setTimeout(resolve, 50));
|
|
12229
|
-
return;
|
|
12230
|
-
}
|
|
12231
|
-
else if (!result.ready) ;
|
|
12232
|
-
else {
|
|
12233
|
-
// We'll keep on downloading, but can report that this priority is synced now.
|
|
12234
|
-
this.logger.debug('partial checkpoint validation succeeded');
|
|
12235
|
-
// All states with a higher priority can be deleted since this partial sync includes them.
|
|
12236
|
-
const priorityStates = this.syncStatus.priorityStatusEntries.filter((s) => s.priority <= priority);
|
|
12237
|
-
priorityStates.push({
|
|
12238
|
-
priority,
|
|
12239
|
-
lastSyncedAt: new Date(),
|
|
12240
|
-
hasSynced: true
|
|
12241
|
-
});
|
|
12242
|
-
this.updateSyncStatus({
|
|
12243
|
-
connected: true,
|
|
12244
|
-
priorityStatusEntries: priorityStates
|
|
12245
|
-
});
|
|
12246
|
-
}
|
|
12247
|
-
}
|
|
12248
|
-
else if (isStreamingSyncCheckpointDiff(line)) {
|
|
12249
|
-
// TODO: It may be faster to just keep track of the diff, instead of the entire checkpoint
|
|
12250
|
-
if (targetCheckpoint == null) {
|
|
12251
|
-
throw new Error('Checkpoint diff without previous checkpoint');
|
|
12252
|
-
}
|
|
12253
|
-
// New checkpoint - existing validated checkpoint is no longer valid
|
|
12254
|
-
pendingValidatedCheckpoint = null;
|
|
12255
|
-
const diff = line.checkpoint_diff;
|
|
12256
|
-
const newBuckets = new Map();
|
|
12257
|
-
for (const checksum of targetCheckpoint.buckets) {
|
|
12258
|
-
newBuckets.set(checksum.bucket, checksum);
|
|
12259
|
-
}
|
|
12260
|
-
for (const checksum of diff.updated_buckets) {
|
|
12261
|
-
newBuckets.set(checksum.bucket, checksum);
|
|
12262
|
-
}
|
|
12263
|
-
for (const bucket of diff.removed_buckets) {
|
|
12264
|
-
newBuckets.delete(bucket);
|
|
12265
|
-
}
|
|
12266
|
-
const newCheckpoint = {
|
|
12267
|
-
last_op_id: diff.last_op_id,
|
|
12268
|
-
buckets: [...newBuckets.values()],
|
|
12269
|
-
write_checkpoint: diff.write_checkpoint
|
|
12270
|
-
};
|
|
12271
|
-
targetCheckpoint = newCheckpoint;
|
|
12272
|
-
await this.updateSyncStatusForStartingCheckpoint(targetCheckpoint);
|
|
12273
|
-
bucketMap = new Map();
|
|
12274
|
-
newBuckets.forEach((checksum, name) => bucketMap.set(name, {
|
|
12275
|
-
name: checksum.bucket,
|
|
12276
|
-
priority: checksum.priority ?? FALLBACK_PRIORITY
|
|
12277
|
-
}));
|
|
12278
|
-
const bucketsToDelete = diff.removed_buckets;
|
|
12279
|
-
if (bucketsToDelete.length > 0) {
|
|
12280
|
-
this.logger.debug('Remove buckets', bucketsToDelete);
|
|
12281
|
-
}
|
|
12282
|
-
await this.options.adapter.removeBuckets(bucketsToDelete);
|
|
12283
|
-
await this.options.adapter.setTargetCheckpoint(targetCheckpoint);
|
|
12284
|
-
}
|
|
12285
|
-
else if (isStreamingSyncData(line)) {
|
|
12286
|
-
const { data } = line;
|
|
12287
|
-
const previousProgress = this.syncStatus.dataFlowStatus.downloadProgress;
|
|
12288
|
-
let updatedProgress = null;
|
|
12289
|
-
if (previousProgress) {
|
|
12290
|
-
updatedProgress = { ...previousProgress };
|
|
12291
|
-
const progressForBucket = updatedProgress[data.bucket];
|
|
12292
|
-
if (progressForBucket) {
|
|
12293
|
-
updatedProgress[data.bucket] = {
|
|
12294
|
-
...progressForBucket,
|
|
12295
|
-
since_last: progressForBucket.since_last + data.data.length
|
|
12296
|
-
};
|
|
12297
|
-
}
|
|
12298
|
-
}
|
|
12299
|
-
this.updateSyncStatus({
|
|
12300
|
-
dataFlow: {
|
|
12301
|
-
downloading: true,
|
|
12302
|
-
downloadProgress: updatedProgress
|
|
12303
|
-
}
|
|
12304
|
-
});
|
|
12305
|
-
await this.options.adapter.saveSyncData({ buckets: [SyncDataBucket.fromRow(data)] }, usingFixedKeyFormat);
|
|
12306
|
-
}
|
|
12307
|
-
else if (isStreamingKeepalive(line)) {
|
|
12308
|
-
const remaining_seconds = line.token_expires_in;
|
|
12309
|
-
if (remaining_seconds == 0) {
|
|
12310
|
-
// Connection would be closed automatically right after this
|
|
12311
|
-
this.logger.debug('Token expiring; reconnect');
|
|
12312
|
-
/**
|
|
12313
|
-
* For a rare case where the backend connector does not update the token
|
|
12314
|
-
* (uses the same one), this should have some delay.
|
|
12315
|
-
*/
|
|
12316
|
-
await this.delayRetry();
|
|
12317
|
-
return;
|
|
12318
|
-
}
|
|
12319
|
-
else if (remaining_seconds < 30) {
|
|
12320
|
-
this.logger.debug('Token will expire soon; reconnect');
|
|
12321
|
-
// Pre-emptively refresh the token
|
|
12322
|
-
this.options.remote.invalidateCredentials();
|
|
12323
|
-
return;
|
|
12324
|
-
}
|
|
12325
|
-
this.triggerCrudUpload();
|
|
12326
|
-
}
|
|
12327
|
-
else {
|
|
12328
|
-
this.logger.debug('Received unknown sync line', line);
|
|
12329
|
-
}
|
|
11945
|
+
});
|
|
12330
11946
|
}
|
|
12331
|
-
this.logger.debug('Stream input empty');
|
|
12332
|
-
// Connection closed. Likely due to auth issue.
|
|
12333
|
-
return;
|
|
12334
11947
|
}
|
|
12335
11948
|
async rustSyncIteration(signal, resolvedOptions) {
|
|
12336
11949
|
const syncImplementation = this;
|
|
@@ -12403,6 +12016,10 @@ The next upload iteration will be delayed.`);
|
|
|
12403
12016
|
const rawResponse = await adapter.control(op, payload ?? null);
|
|
12404
12017
|
const logger = syncImplementation.logger;
|
|
12405
12018
|
logger.trace('powersync_control', op, payload == null || typeof payload == 'string' ? payload : '<bytes>', rawResponse);
|
|
12019
|
+
if (op != PowerSyncControlCommand.STOP) {
|
|
12020
|
+
// Evidently we have a working connection here, otherwise powersync_control would have failed.
|
|
12021
|
+
syncImplementation.connectionMayHaveChanged = false;
|
|
12022
|
+
}
|
|
12406
12023
|
await handleInstructions(JSON.parse(rawResponse));
|
|
12407
12024
|
}
|
|
12408
12025
|
async function handleInstruction(instruction) {
|
|
@@ -12489,68 +12106,6 @@ The next upload iteration will be delayed.`);
|
|
|
12489
12106
|
}
|
|
12490
12107
|
return { immediateRestart: hideDisconnectOnRestart };
|
|
12491
12108
|
}
|
|
12492
|
-
async updateSyncStatusForStartingCheckpoint(checkpoint) {
|
|
12493
|
-
const localProgress = await this.options.adapter.getBucketOperationProgress();
|
|
12494
|
-
const progress = {};
|
|
12495
|
-
let invalidated = false;
|
|
12496
|
-
for (const bucket of checkpoint.buckets) {
|
|
12497
|
-
const savedProgress = localProgress[bucket.bucket];
|
|
12498
|
-
const atLast = savedProgress?.atLast ?? 0;
|
|
12499
|
-
const sinceLast = savedProgress?.sinceLast ?? 0;
|
|
12500
|
-
progress[bucket.bucket] = {
|
|
12501
|
-
// The fallback priority doesn't matter here, but 3 is the one newer versions of the sync service
|
|
12502
|
-
// will use by default.
|
|
12503
|
-
priority: bucket.priority ?? 3,
|
|
12504
|
-
at_last: atLast,
|
|
12505
|
-
since_last: sinceLast,
|
|
12506
|
-
target_count: bucket.count ?? 0
|
|
12507
|
-
};
|
|
12508
|
-
if (bucket.count != null && bucket.count < atLast + sinceLast) {
|
|
12509
|
-
// Either due to a defrag / sync rule deploy or a compaction operation, the size
|
|
12510
|
-
// of the bucket shrank so much that the local ops exceed the ops in the updated
|
|
12511
|
-
// bucket. We can't prossibly report progress in this case (it would overshoot 100%).
|
|
12512
|
-
invalidated = true;
|
|
12513
|
-
}
|
|
12514
|
-
}
|
|
12515
|
-
if (invalidated) {
|
|
12516
|
-
for (const bucket in progress) {
|
|
12517
|
-
const bucketProgress = progress[bucket];
|
|
12518
|
-
bucketProgress.at_last = 0;
|
|
12519
|
-
bucketProgress.since_last = 0;
|
|
12520
|
-
}
|
|
12521
|
-
}
|
|
12522
|
-
this.updateSyncStatus({
|
|
12523
|
-
dataFlow: {
|
|
12524
|
-
downloading: true,
|
|
12525
|
-
downloadProgress: progress
|
|
12526
|
-
}
|
|
12527
|
-
});
|
|
12528
|
-
}
|
|
12529
|
-
async applyCheckpoint(checkpoint) {
|
|
12530
|
-
let result = await this.options.adapter.syncLocalDatabase(checkpoint);
|
|
12531
|
-
if (!result.checkpointValid) {
|
|
12532
|
-
this.logger.debug(`Checksum mismatch in checkpoint ${checkpoint.last_op_id}, will reconnect`);
|
|
12533
|
-
// This means checksums failed. Start again with a new checkpoint.
|
|
12534
|
-
// TODO: better back-off
|
|
12535
|
-
await new Promise((resolve) => setTimeout(resolve, 50));
|
|
12536
|
-
return { applied: false, endIteration: true };
|
|
12537
|
-
}
|
|
12538
|
-
else if (!result.ready) {
|
|
12539
|
-
this.logger.debug(`Could not apply checkpoint ${checkpoint.last_op_id} due to local data. We will retry applying the checkpoint after that upload is completed.`);
|
|
12540
|
-
return { applied: false, endIteration: false };
|
|
12541
|
-
}
|
|
12542
|
-
this.logger.debug(`Applied checkpoint ${checkpoint.last_op_id}`, checkpoint);
|
|
12543
|
-
this.updateSyncStatus({
|
|
12544
|
-
connected: true,
|
|
12545
|
-
lastSyncedAt: new Date(),
|
|
12546
|
-
dataFlow: {
|
|
12547
|
-
downloading: false,
|
|
12548
|
-
downloadProgress: null,
|
|
12549
|
-
downloadError: undefined
|
|
12550
|
-
}
|
|
12551
|
-
});
|
|
12552
|
-
return { applied: true, endIteration: false };
|
|
12553
|
-
}
|
|
12554
12109
|
updateSyncStatus(options) {
|
|
12555
12110
|
const updatedStatus = new SyncStatus({
|
|
12556
12111
|
connected: options.connected ?? this.syncStatus.connected,
|
|
@@ -14089,14 +13644,12 @@ class SqliteBucketStorage extends BaseObserver {
|
|
|
14089
13644
|
db;
|
|
14090
13645
|
logger;
|
|
14091
13646
|
tableNames;
|
|
14092
|
-
_hasCompletedSync;
|
|
14093
13647
|
updateListener;
|
|
14094
13648
|
_clientId;
|
|
14095
13649
|
constructor(db, logger = Logger.get('SqliteBucketStorage')) {
|
|
14096
13650
|
super();
|
|
14097
13651
|
this.db = db;
|
|
14098
13652
|
this.logger = logger;
|
|
14099
|
-
this._hasCompletedSync = false;
|
|
14100
13653
|
this.tableNames = new Set();
|
|
14101
13654
|
this.updateListener = db.registerListener({
|
|
14102
13655
|
tablesUpdated: (update) => {
|
|
@@ -14108,7 +13661,6 @@ class SqliteBucketStorage extends BaseObserver {
|
|
|
14108
13661
|
});
|
|
14109
13662
|
}
|
|
14110
13663
|
async init() {
|
|
14111
|
-
this._hasCompletedSync = false;
|
|
14112
13664
|
const existingTableRows = await this.db.getAll(`SELECT name FROM sqlite_master WHERE type='table' AND name GLOB 'ps_data_*'`);
|
|
14113
13665
|
for (const row of existingTableRows ?? []) {
|
|
14114
13666
|
this.tableNames.add(row.name);
|
|
@@ -14130,156 +13682,6 @@ class SqliteBucketStorage extends BaseObserver {
|
|
|
14130
13682
|
getMaxOpId() {
|
|
14131
13683
|
return MAX_OP_ID;
|
|
14132
13684
|
}
|
|
14133
|
-
/**
|
|
14134
|
-
* Reset any caches.
|
|
14135
|
-
*/
|
|
14136
|
-
startSession() { }
|
|
14137
|
-
async getBucketStates() {
|
|
14138
|
-
const result = await this.db.getAll("SELECT name as bucket, cast(last_op as TEXT) as op_id FROM ps_buckets WHERE pending_delete = 0 AND name != '$local'");
|
|
14139
|
-
return result;
|
|
14140
|
-
}
|
|
14141
|
-
async getBucketOperationProgress() {
|
|
14142
|
-
const rows = await this.db.getAll('SELECT name, count_at_last, count_since_last FROM ps_buckets');
|
|
14143
|
-
return Object.fromEntries(rows.map((r) => [r.name, { atLast: r.count_at_last, sinceLast: r.count_since_last }]));
|
|
14144
|
-
}
|
|
14145
|
-
async saveSyncData(batch, fixedKeyFormat = false) {
|
|
14146
|
-
await this.writeTransaction(async (tx) => {
|
|
14147
|
-
for (const b of batch.buckets) {
|
|
14148
|
-
await tx.execute('INSERT INTO powersync_operations(op, data) VALUES(?, ?)', [
|
|
14149
|
-
'save',
|
|
14150
|
-
JSON.stringify({ buckets: [b.toJSON(fixedKeyFormat)] })
|
|
14151
|
-
]);
|
|
14152
|
-
this.logger.debug(`Saved batch of data for bucket: ${b.bucket}, operations: ${b.data.length}`);
|
|
14153
|
-
}
|
|
14154
|
-
});
|
|
14155
|
-
}
|
|
14156
|
-
async removeBuckets(buckets) {
|
|
14157
|
-
for (const bucket of buckets) {
|
|
14158
|
-
await this.deleteBucket(bucket);
|
|
14159
|
-
}
|
|
14160
|
-
}
|
|
14161
|
-
/**
|
|
14162
|
-
* Mark a bucket for deletion.
|
|
14163
|
-
*/
|
|
14164
|
-
async deleteBucket(bucket) {
|
|
14165
|
-
await this.writeTransaction(async (tx) => {
|
|
14166
|
-
await tx.execute('INSERT INTO powersync_operations(op, data) VALUES(?, ?)', ['delete_bucket', bucket]);
|
|
14167
|
-
});
|
|
14168
|
-
this.logger.debug(`Done deleting bucket ${bucket}`);
|
|
14169
|
-
}
|
|
14170
|
-
async hasCompletedSync() {
|
|
14171
|
-
if (this._hasCompletedSync) {
|
|
14172
|
-
return true;
|
|
14173
|
-
}
|
|
14174
|
-
const r = await this.db.get(`SELECT powersync_last_synced_at() as synced_at`);
|
|
14175
|
-
const completed = r.synced_at != null;
|
|
14176
|
-
if (completed) {
|
|
14177
|
-
this._hasCompletedSync = true;
|
|
14178
|
-
}
|
|
14179
|
-
return completed;
|
|
14180
|
-
}
|
|
14181
|
-
async syncLocalDatabase(checkpoint, priority) {
|
|
14182
|
-
const r = await this.validateChecksums(checkpoint, priority);
|
|
14183
|
-
if (!r.checkpointValid) {
|
|
14184
|
-
this.logger.error('Checksums failed for', r.checkpointFailures);
|
|
14185
|
-
for (const b of r.checkpointFailures ?? []) {
|
|
14186
|
-
await this.deleteBucket(b);
|
|
14187
|
-
}
|
|
14188
|
-
return { ready: false, checkpointValid: false, checkpointFailures: r.checkpointFailures };
|
|
14189
|
-
}
|
|
14190
|
-
if (priority == null) {
|
|
14191
|
-
this.logger.debug(`Validated checksums checkpoint ${checkpoint.last_op_id}`);
|
|
14192
|
-
}
|
|
14193
|
-
else {
|
|
14194
|
-
this.logger.debug(`Validated checksums for partial checkpoint ${checkpoint.last_op_id}, priority ${priority}`);
|
|
14195
|
-
}
|
|
14196
|
-
let buckets = checkpoint.buckets;
|
|
14197
|
-
if (priority !== undefined) {
|
|
14198
|
-
buckets = buckets.filter((b) => hasMatchingPriority(priority, b));
|
|
14199
|
-
}
|
|
14200
|
-
const bucketNames = buckets.map((b) => b.bucket);
|
|
14201
|
-
await this.writeTransaction(async (tx) => {
|
|
14202
|
-
await tx.execute(`UPDATE ps_buckets SET last_op = ? WHERE name IN (SELECT json_each.value FROM json_each(?))`, [
|
|
14203
|
-
checkpoint.last_op_id,
|
|
14204
|
-
JSON.stringify(bucketNames)
|
|
14205
|
-
]);
|
|
14206
|
-
if (priority == null && checkpoint.write_checkpoint) {
|
|
14207
|
-
await tx.execute("UPDATE ps_buckets SET last_op = ? WHERE name = '$local'", [checkpoint.write_checkpoint]);
|
|
14208
|
-
}
|
|
14209
|
-
});
|
|
14210
|
-
const valid = await this.updateObjectsFromBuckets(checkpoint, priority);
|
|
14211
|
-
if (!valid) {
|
|
14212
|
-
return { ready: false, checkpointValid: true };
|
|
14213
|
-
}
|
|
14214
|
-
return {
|
|
14215
|
-
ready: true,
|
|
14216
|
-
checkpointValid: true
|
|
14217
|
-
};
|
|
14218
|
-
}
|
|
14219
|
-
/**
|
|
14220
|
-
* Atomically update the local state to the current checkpoint.
|
|
14221
|
-
*
|
|
14222
|
-
* This includes creating new tables, dropping old tables, and copying data over from the oplog.
|
|
14223
|
-
*/
|
|
14224
|
-
async updateObjectsFromBuckets(checkpoint, priority) {
|
|
14225
|
-
let arg = '';
|
|
14226
|
-
if (priority !== undefined) {
|
|
14227
|
-
const affectedBuckets = [];
|
|
14228
|
-
for (const desc of checkpoint.buckets) {
|
|
14229
|
-
if (hasMatchingPriority(priority, desc)) {
|
|
14230
|
-
affectedBuckets.push(desc.bucket);
|
|
14231
|
-
}
|
|
14232
|
-
}
|
|
14233
|
-
arg = JSON.stringify({ priority, buckets: affectedBuckets });
|
|
14234
|
-
}
|
|
14235
|
-
return this.writeTransaction(async (tx) => {
|
|
14236
|
-
const { insertId: result } = await tx.execute('INSERT INTO powersync_operations(op, data) VALUES(?, ?)', [
|
|
14237
|
-
'sync_local',
|
|
14238
|
-
arg
|
|
14239
|
-
]);
|
|
14240
|
-
if (result == 1) {
|
|
14241
|
-
if (priority == null) {
|
|
14242
|
-
const bucketToCount = Object.fromEntries(checkpoint.buckets.map((b) => [b.bucket, b.count]));
|
|
14243
|
-
// The two parameters could be replaced with one, but: https://github.com/powersync-ja/better-sqlite3/pull/6
|
|
14244
|
-
const jsonBucketCount = JSON.stringify(bucketToCount);
|
|
14245
|
-
await tx.execute("UPDATE ps_buckets SET count_since_last = 0, count_at_last = ?->name WHERE name != '$local' AND ?->name IS NOT NULL", [jsonBucketCount, jsonBucketCount]);
|
|
14246
|
-
}
|
|
14247
|
-
return true;
|
|
14248
|
-
}
|
|
14249
|
-
else {
|
|
14250
|
-
return false;
|
|
14251
|
-
}
|
|
14252
|
-
});
|
|
14253
|
-
}
|
|
14254
|
-
async validateChecksums(checkpoint, priority) {
|
|
14255
|
-
if (priority !== undefined) {
|
|
14256
|
-
// Only validate the buckets within the priority we care about
|
|
14257
|
-
const newBuckets = checkpoint.buckets.filter((cs) => hasMatchingPriority(priority, cs));
|
|
14258
|
-
checkpoint = { ...checkpoint, buckets: newBuckets };
|
|
14259
|
-
}
|
|
14260
|
-
const rs = await this.db.execute('SELECT powersync_validate_checkpoint(?) as result', [
|
|
14261
|
-
JSON.stringify({ ...checkpoint })
|
|
14262
|
-
]);
|
|
14263
|
-
const resultItem = rs.rows?.item(0);
|
|
14264
|
-
if (!resultItem) {
|
|
14265
|
-
return {
|
|
14266
|
-
checkpointValid: false,
|
|
14267
|
-
ready: false,
|
|
14268
|
-
checkpointFailures: []
|
|
14269
|
-
};
|
|
14270
|
-
}
|
|
14271
|
-
const result = JSON.parse(resultItem['result']);
|
|
14272
|
-
if (result['valid']) {
|
|
14273
|
-
return { ready: true, checkpointValid: true };
|
|
14274
|
-
}
|
|
14275
|
-
else {
|
|
14276
|
-
return {
|
|
14277
|
-
checkpointValid: false,
|
|
14278
|
-
ready: false,
|
|
14279
|
-
checkpointFailures: result['failed_buckets']
|
|
14280
|
-
};
|
|
14281
|
-
}
|
|
14282
|
-
}
|
|
14283
13685
|
async updateLocalTarget(cb) {
|
|
14284
13686
|
const rs1 = await this.db.getAll("SELECT target_op FROM ps_buckets WHERE name = '$local' AND target_op = CAST(? as INTEGER)", [MAX_OP_ID]);
|
|
14285
13687
|
if (!rs1.length) {
|
|
@@ -14370,12 +13772,6 @@ class SqliteBucketStorage extends BaseObserver {
|
|
|
14370
13772
|
async writeTransaction(callback, options) {
|
|
14371
13773
|
return this.db.writeTransaction(callback, options);
|
|
14372
13774
|
}
|
|
14373
|
-
/**
|
|
14374
|
-
* Set a target checkpoint.
|
|
14375
|
-
*/
|
|
14376
|
-
async setTargetCheckpoint(checkpoint) {
|
|
14377
|
-
// No-op for now
|
|
14378
|
-
}
|
|
14379
13775
|
async control(op, payload) {
|
|
14380
13776
|
return await this.writeTransaction(async (tx) => {
|
|
14381
13777
|
const [[raw]] = await tx.executeRaw('SELECT powersync_control(?, ?)', [op, payload]);
|
|
@@ -14399,20 +13795,6 @@ class SqliteBucketStorage extends BaseObserver {
|
|
|
14399
13795
|
}
|
|
14400
13796
|
static _subkeyMigrationKey = 'powersync_js_migrated_subkeys';
|
|
14401
13797
|
}
|
|
14402
|
-
function hasMatchingPriority(priority, bucket) {
|
|
14403
|
-
return bucket.priority != null && bucket.priority <= priority;
|
|
14404
|
-
}
|
|
14405
|
-
|
|
14406
|
-
// TODO JSON
|
|
14407
|
-
class SyncDataBatch {
|
|
14408
|
-
buckets;
|
|
14409
|
-
static fromJSON(json) {
|
|
14410
|
-
return new SyncDataBatch(json.buckets.map((bucket) => SyncDataBucket.fromRow(bucket)));
|
|
14411
|
-
}
|
|
14412
|
-
constructor(buckets) {
|
|
14413
|
-
this.buckets = buckets;
|
|
14414
|
-
}
|
|
14415
|
-
}
|
|
14416
13798
|
|
|
14417
13799
|
/**
|
|
14418
13800
|
* Thrown when an underlying database connection is closed.
|
|
@@ -14472,10 +13854,8 @@ class Schema {
|
|
|
14472
13854
|
* developer instead of automatically by PowerSync.
|
|
14473
13855
|
* Since raw tables are not backed by JSON, running complex queries on them may be more efficient. Further, they allow
|
|
14474
13856
|
* using client-side table and column constraints.
|
|
14475
|
-
* Note that raw tables are only supported when using the new `SyncClientImplementation.rust` sync client.
|
|
14476
13857
|
*
|
|
14477
13858
|
* @param tables An object of (table name, raw table definition) entries.
|
|
14478
|
-
* @experimental Note that the raw tables API is still experimental and may change in the future.
|
|
14479
13859
|
*/
|
|
14480
13860
|
withRawTables(tables) {
|
|
14481
13861
|
for (const [name, rawTableDefinition] of Object.entries(tables)) {
|
|
@@ -14672,5 +14052,5 @@ const parseQuery = (query, parameters) => {
|
|
|
14672
14052
|
return { sqlStatement, parameters: parameters };
|
|
14673
14053
|
};
|
|
14674
14054
|
|
|
14675
|
-
export { ATTACHMENT_TABLE, AbortOperation, AbstractPowerSyncDatabase, AbstractPowerSyncDatabaseOpenFactory, AbstractQueryProcessor, AbstractRemote, AbstractStreamingSyncImplementation, ArrayComparator, AttachmentContext, AttachmentQueue, AttachmentService, AttachmentState, AttachmentTable, BaseObserver, Column, ColumnType, ConnectionClosedError, ConnectionManager, ControlledExecutor, CrudBatch, CrudEntry, CrudTransaction, DBAdapterDefaultMixin, DBGetUtilsDefaultMixin, DEFAULT_CRUD_BATCH_LIMIT, DEFAULT_CRUD_UPLOAD_THROTTLE_MS, DEFAULT_INDEX_COLUMN_OPTIONS, DEFAULT_INDEX_OPTIONS, DEFAULT_LOCK_TIMEOUT_MS, DEFAULT_POWERSYNC_CLOSE_OPTIONS, DEFAULT_POWERSYNC_DB_OPTIONS, DEFAULT_REMOTE_LOGGER, DEFAULT_REMOTE_OPTIONS, DEFAULT_RETRY_DELAY_MS, DEFAULT_ROW_COMPARATOR, DEFAULT_STREAMING_SYNC_OPTIONS, DEFAULT_STREAM_CONNECTION_OPTIONS, DEFAULT_SYNC_CLIENT_IMPLEMENTATION, DEFAULT_TABLE_OPTIONS, DEFAULT_WATCH_QUERY_OPTIONS, DEFAULT_WATCH_THROTTLE_MS, DiffTriggerOperation, DifferentialQueryProcessor, EMPTY_DIFFERENTIAL, EncodingType, FalsyComparator, FetchImplementationProvider, FetchStrategy, GetAllQuery, Index, IndexedColumn, InvalidSQLCharacters, LockType, LogLevel, MAX_AMOUNT_OF_COLUMNS, MAX_OP_ID, MEMORY_TRIGGER_CLAIM_MANAGER, Mutex, OnChangeQueryProcessor,
|
|
14055
|
+
export { ATTACHMENT_TABLE, AbortOperation, AbstractPowerSyncDatabase, AbstractPowerSyncDatabaseOpenFactory, AbstractQueryProcessor, AbstractRemote, AbstractStreamingSyncImplementation, ArrayComparator, AttachmentContext, AttachmentQueue, AttachmentService, AttachmentState, AttachmentTable, BaseObserver, Column, ColumnType, ConnectionClosedError, ConnectionManager, ControlledExecutor, CrudBatch, CrudEntry, CrudTransaction, DBAdapterDefaultMixin, DBGetUtilsDefaultMixin, DEFAULT_CRUD_BATCH_LIMIT, DEFAULT_CRUD_UPLOAD_THROTTLE_MS, DEFAULT_INDEX_COLUMN_OPTIONS, DEFAULT_INDEX_OPTIONS, DEFAULT_LOCK_TIMEOUT_MS, DEFAULT_POWERSYNC_CLOSE_OPTIONS, DEFAULT_POWERSYNC_DB_OPTIONS, DEFAULT_REMOTE_LOGGER, DEFAULT_REMOTE_OPTIONS, DEFAULT_RETRY_DELAY_MS, DEFAULT_ROW_COMPARATOR, DEFAULT_STREAMING_SYNC_OPTIONS, DEFAULT_STREAM_CONNECTION_OPTIONS, DEFAULT_SYNC_CLIENT_IMPLEMENTATION, DEFAULT_TABLE_OPTIONS, DEFAULT_WATCH_QUERY_OPTIONS, DEFAULT_WATCH_THROTTLE_MS, DiffTriggerOperation, DifferentialQueryProcessor, EMPTY_DIFFERENTIAL, EncodingType, FalsyComparator, FetchImplementationProvider, FetchStrategy, GetAllQuery, Index, IndexedColumn, InvalidSQLCharacters, LockType, LogLevel, MAX_AMOUNT_OF_COLUMNS, MAX_OP_ID, MEMORY_TRIGGER_CLAIM_MANAGER, Mutex, OnChangeQueryProcessor, PSInternalTable, PowerSyncControlCommand, RowUpdateType, Schema, Semaphore, SqliteBucketStorage, SyncClientImplementation, SyncProgress, SyncStatus, SyncStreamConnectionMethod, SyncingService, Table, TableV2, TriggerManagerImpl, UpdateType, UploadQueueStats, WatchedQueryListenerEvent, attachmentFromSql, column, compilableQueryWatch, createBaseLogger, createLogger, extractTableUpdates, isBatchedUpdateNotification, isDBAdapter, isPowerSyncDatabaseOptionsWithSettings, isSQLOpenFactory, isSQLOpenOptions, parseQuery, runOnSchemaChange, sanitizeSQL, sanitizeUUID, timeoutSignal };
|
|
14676
14056
|
//# sourceMappingURL=bundle.mjs.map
|