@powersync/common 0.0.0-dev-20260311103504 → 0.0.0-dev-20260503073249
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bundle.cjs +791 -489
- package/dist/bundle.cjs.map +1 -1
- package/dist/bundle.mjs +785 -485
- package/dist/bundle.mjs.map +1 -1
- package/dist/bundle.node.cjs +789 -488
- package/dist/bundle.node.cjs.map +1 -1
- package/dist/bundle.node.mjs +783 -484
- package/dist/bundle.node.mjs.map +1 -1
- package/dist/index.d.cts +165 -103
- package/lib/attachments/AttachmentQueue.d.ts +10 -4
- package/lib/attachments/AttachmentQueue.js +10 -4
- package/lib/attachments/AttachmentQueue.js.map +1 -1
- package/lib/attachments/AttachmentService.js +2 -3
- package/lib/attachments/AttachmentService.js.map +1 -1
- package/lib/attachments/SyncingService.d.ts +2 -1
- package/lib/attachments/SyncingService.js +4 -5
- package/lib/attachments/SyncingService.js.map +1 -1
- package/lib/client/AbstractPowerSyncDatabase.d.ts +5 -1
- package/lib/client/AbstractPowerSyncDatabase.js +9 -5
- package/lib/client/AbstractPowerSyncDatabase.js.map +1 -1
- package/lib/client/sync/stream/AbstractRemote.d.ts +29 -8
- package/lib/client/sync/stream/AbstractRemote.js +154 -177
- package/lib/client/sync/stream/AbstractRemote.js.map +1 -1
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.d.ts +4 -0
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js +88 -88
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js.map +1 -1
- package/lib/db/DBAdapter.d.ts +55 -9
- package/lib/db/DBAdapter.js +126 -0
- package/lib/db/DBAdapter.js.map +1 -1
- package/lib/db/crud/SyncStatus.d.ts +0 -4
- package/lib/db/crud/SyncStatus.js +0 -4
- package/lib/db/crud/SyncStatus.js.map +1 -1
- package/lib/db/schema/RawTable.d.ts +0 -5
- package/lib/db/schema/Schema.d.ts +0 -2
- package/lib/db/schema/Schema.js +0 -2
- package/lib/db/schema/Schema.js.map +1 -1
- package/lib/index.d.ts +1 -1
- package/lib/index.js +0 -1
- package/lib/index.js.map +1 -1
- package/lib/utils/async.d.ts +0 -9
- package/lib/utils/async.js +0 -9
- package/lib/utils/async.js.map +1 -1
- package/lib/utils/mutex.d.ts +47 -5
- package/lib/utils/mutex.js +146 -21
- package/lib/utils/mutex.js.map +1 -1
- package/lib/utils/queue.d.ts +16 -0
- package/lib/utils/queue.js +42 -0
- package/lib/utils/queue.js.map +1 -0
- package/lib/utils/stream_transform.d.ts +39 -0
- package/lib/utils/stream_transform.js +206 -0
- package/lib/utils/stream_transform.js.map +1 -0
- package/package.json +9 -8
- package/src/attachments/AttachmentQueue.ts +10 -4
- package/src/attachments/AttachmentService.ts +2 -3
- package/src/attachments/README.md +6 -4
- package/src/attachments/SyncingService.ts +4 -5
- package/src/client/AbstractPowerSyncDatabase.ts +9 -5
- package/src/client/sync/stream/AbstractRemote.ts +182 -206
- package/src/client/sync/stream/AbstractStreamingSyncImplementation.ts +96 -83
- package/src/db/DBAdapter.ts +167 -9
- package/src/db/crud/SyncStatus.ts +0 -4
- package/src/db/schema/RawTable.ts +0 -5
- package/src/db/schema/Schema.ts +0 -2
- package/src/index.ts +1 -1
- package/src/utils/async.ts +0 -11
- package/src/utils/mutex.ts +184 -26
- package/src/utils/queue.ts +48 -0
- package/src/utils/stream_transform.ts +252 -0
- package/lib/utils/DataStream.d.ts +0 -62
- package/lib/utils/DataStream.js +0 -169
- package/lib/utils/DataStream.js.map +0 -1
- package/src/utils/DataStream.ts +0 -222
package/dist/bundle.cjs
CHANGED
|
@@ -1,7 +1,5 @@
|
|
|
1
1
|
'use strict';
|
|
2
2
|
|
|
3
|
-
var asyncMutex = require('async-mutex');
|
|
4
|
-
|
|
5
3
|
// https://www.sqlite.org/lang_expr.html#castexpr
|
|
6
4
|
exports.ColumnType = void 0;
|
|
7
5
|
(function (ColumnType) {
|
|
@@ -659,7 +657,7 @@ class SyncingService {
|
|
|
659
657
|
updatedAttachments.push(downloaded);
|
|
660
658
|
break;
|
|
661
659
|
case exports.AttachmentState.QUEUED_DELETE:
|
|
662
|
-
const deleted = await this.deleteAttachment(attachment);
|
|
660
|
+
const deleted = await this.deleteAttachment(attachment, context);
|
|
663
661
|
updatedAttachments.push(deleted);
|
|
664
662
|
break;
|
|
665
663
|
}
|
|
@@ -737,17 +735,16 @@ class SyncingService {
|
|
|
737
735
|
* On failure, defers to error handler or archives.
|
|
738
736
|
*
|
|
739
737
|
* @param attachment - The attachment record to delete
|
|
738
|
+
* @param context - Attachment context for database operations
|
|
740
739
|
* @returns Updated attachment record
|
|
741
740
|
*/
|
|
742
|
-
async deleteAttachment(attachment) {
|
|
741
|
+
async deleteAttachment(attachment, context) {
|
|
743
742
|
try {
|
|
744
743
|
await this.remoteStorage.deleteFile(attachment);
|
|
745
744
|
if (attachment.localUri) {
|
|
746
745
|
await this.localStorage.deleteFile(attachment.localUri);
|
|
747
746
|
}
|
|
748
|
-
await
|
|
749
|
-
await ctx.deleteAttachment(attachment.id);
|
|
750
|
-
});
|
|
747
|
+
await context.deleteAttachment(attachment.id);
|
|
751
748
|
return {
|
|
752
749
|
...attachment,
|
|
753
750
|
state: exports.AttachmentState.ARCHIVED
|
|
@@ -785,32 +782,198 @@ class SyncingService {
|
|
|
785
782
|
}
|
|
786
783
|
|
|
787
784
|
/**
|
|
788
|
-
*
|
|
785
|
+
* A simple fixed-capacity queue implementation.
|
|
786
|
+
*
|
|
787
|
+
* Unlike a naive queue implemented by `array.push()` and `array.shift()`, this avoids moving array elements around
|
|
788
|
+
* and is `O(1)` for {@link addLast} and {@link removeFirst}.
|
|
789
789
|
*/
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
790
|
+
class Queue {
|
|
791
|
+
table;
|
|
792
|
+
// Index of the first element in the table.
|
|
793
|
+
head;
|
|
794
|
+
// Amount of items currently in the queue.
|
|
795
|
+
_length;
|
|
796
|
+
constructor(initialItems) {
|
|
797
|
+
this.table = [...initialItems];
|
|
798
|
+
this.head = 0;
|
|
799
|
+
this._length = this.table.length;
|
|
800
|
+
}
|
|
801
|
+
get isEmpty() {
|
|
802
|
+
return this.length == 0;
|
|
803
|
+
}
|
|
804
|
+
get length() {
|
|
805
|
+
return this._length;
|
|
806
|
+
}
|
|
807
|
+
removeFirst() {
|
|
808
|
+
if (this.isEmpty) {
|
|
809
|
+
throw new Error('Queue is empty');
|
|
810
|
+
}
|
|
811
|
+
const result = this.table[this.head];
|
|
812
|
+
this._length--;
|
|
813
|
+
this.table[this.head] = undefined;
|
|
814
|
+
this.head = (this.head + 1) % this.table.length;
|
|
815
|
+
return result;
|
|
816
|
+
}
|
|
817
|
+
addLast(element) {
|
|
818
|
+
if (this.length == this.table.length) {
|
|
819
|
+
throw new Error('Queue is full');
|
|
820
|
+
}
|
|
821
|
+
this.table[(this.head + this._length) % this.table.length] = element;
|
|
822
|
+
this._length++;
|
|
823
|
+
}
|
|
824
|
+
}
|
|
825
|
+
|
|
826
|
+
/**
|
|
827
|
+
* An asynchronous semaphore implementation with associated items per lease.
|
|
828
|
+
*
|
|
829
|
+
* @internal This class is meant to be used in PowerSync SDKs only, and is not part of the public API.
|
|
830
|
+
*/
|
|
831
|
+
class Semaphore {
|
|
832
|
+
// Available items that are not currently assigned to a waiter.
|
|
833
|
+
available;
|
|
834
|
+
size;
|
|
835
|
+
// Linked list of waiters. We don't expect the wait list to become particularly large, and this allows removing
|
|
836
|
+
// aborted waiters from the middle of the list efficiently.
|
|
837
|
+
firstWaiter;
|
|
838
|
+
lastWaiter;
|
|
839
|
+
constructor(elements) {
|
|
840
|
+
this.available = new Queue(elements);
|
|
841
|
+
this.size = this.available.length;
|
|
842
|
+
}
|
|
843
|
+
addWaiter(requestedItems, onAcquire) {
|
|
844
|
+
const node = {
|
|
845
|
+
isActive: true,
|
|
846
|
+
acquiredItems: [],
|
|
847
|
+
remainingItems: requestedItems,
|
|
848
|
+
onAcquire,
|
|
849
|
+
prev: this.lastWaiter
|
|
850
|
+
};
|
|
851
|
+
if (this.lastWaiter) {
|
|
852
|
+
this.lastWaiter.next = node;
|
|
853
|
+
this.lastWaiter = node;
|
|
854
|
+
}
|
|
855
|
+
else {
|
|
856
|
+
// First waiter
|
|
857
|
+
this.lastWaiter = this.firstWaiter = node;
|
|
858
|
+
}
|
|
859
|
+
return node;
|
|
860
|
+
}
|
|
861
|
+
deactivateWaiter(waiter) {
|
|
862
|
+
const { prev, next } = waiter;
|
|
863
|
+
waiter.isActive = false;
|
|
864
|
+
if (prev)
|
|
865
|
+
prev.next = next;
|
|
866
|
+
if (next)
|
|
867
|
+
next.prev = prev;
|
|
868
|
+
if (waiter == this.firstWaiter)
|
|
869
|
+
this.firstWaiter = next;
|
|
870
|
+
if (waiter == this.lastWaiter)
|
|
871
|
+
this.lastWaiter = prev;
|
|
872
|
+
}
|
|
873
|
+
requestPermits(amount, abort) {
|
|
874
|
+
if (amount <= 0 || amount > this.size) {
|
|
875
|
+
throw new Error(`Invalid amount of items requested (${amount}), must be between 1 and ${this.size}`);
|
|
876
|
+
}
|
|
877
|
+
return new Promise((resolve, reject) => {
|
|
878
|
+
function rejectAborted() {
|
|
879
|
+
reject(abort?.reason ?? new Error('Semaphore acquire aborted'));
|
|
880
|
+
}
|
|
881
|
+
if (abort?.aborted) {
|
|
882
|
+
return rejectAborted();
|
|
883
|
+
}
|
|
884
|
+
let waiter;
|
|
885
|
+
const markCompleted = () => {
|
|
886
|
+
const items = waiter.acquiredItems;
|
|
887
|
+
waiter.acquiredItems = []; // Avoid releasing items twice.
|
|
888
|
+
for (const element of items) {
|
|
889
|
+
// Give to next waiter, if possible.
|
|
890
|
+
const nextWaiter = this.firstWaiter;
|
|
891
|
+
if (nextWaiter) {
|
|
892
|
+
nextWaiter.acquiredItems.push(element);
|
|
893
|
+
nextWaiter.remainingItems--;
|
|
894
|
+
if (nextWaiter.remainingItems == 0) {
|
|
895
|
+
nextWaiter.onAcquire();
|
|
896
|
+
}
|
|
897
|
+
}
|
|
898
|
+
else {
|
|
899
|
+
// No pending waiter, return lease into pool.
|
|
900
|
+
this.available.addLast(element);
|
|
901
|
+
}
|
|
902
|
+
}
|
|
903
|
+
};
|
|
904
|
+
const onAbort = () => {
|
|
905
|
+
abort?.removeEventListener('abort', onAbort);
|
|
906
|
+
if (waiter.isActive) {
|
|
907
|
+
this.deactivateWaiter(waiter);
|
|
908
|
+
rejectAborted();
|
|
909
|
+
}
|
|
910
|
+
};
|
|
911
|
+
const resolvePromise = () => {
|
|
912
|
+
this.deactivateWaiter(waiter);
|
|
913
|
+
abort?.removeEventListener('abort', onAbort);
|
|
914
|
+
const items = waiter.acquiredItems;
|
|
915
|
+
resolve({ items, release: markCompleted });
|
|
916
|
+
};
|
|
917
|
+
waiter = this.addWaiter(amount, resolvePromise);
|
|
918
|
+
// If there are items in the pool that haven't been assigned, we can pull them into this waiter. Note that this is
|
|
919
|
+
// only the case if we're the first waiter (otherwise, items would have been assigned to an earlier waiter).
|
|
920
|
+
while (!this.available.isEmpty && waiter.remainingItems > 0) {
|
|
921
|
+
waiter.acquiredItems.push(this.available.removeFirst());
|
|
922
|
+
waiter.remainingItems--;
|
|
808
923
|
}
|
|
809
|
-
|
|
810
|
-
|
|
924
|
+
if (waiter.remainingItems == 0) {
|
|
925
|
+
return resolvePromise();
|
|
811
926
|
}
|
|
927
|
+
abort?.addEventListener('abort', onAbort);
|
|
812
928
|
});
|
|
813
|
-
}
|
|
929
|
+
}
|
|
930
|
+
/**
|
|
931
|
+
* Requests a single item from the pool.
|
|
932
|
+
*
|
|
933
|
+
* The returned `release` callback must be invoked to return the item into the pool.
|
|
934
|
+
*/
|
|
935
|
+
async requestOne(abort) {
|
|
936
|
+
const { items, release } = await this.requestPermits(1, abort);
|
|
937
|
+
return { release, item: items[0] };
|
|
938
|
+
}
|
|
939
|
+
/**
|
|
940
|
+
* Requests access to all items from the pool.
|
|
941
|
+
*
|
|
942
|
+
* The returned `release` callback must be invoked to return items into the pool.
|
|
943
|
+
*/
|
|
944
|
+
requestAll(abort) {
|
|
945
|
+
return this.requestPermits(this.size, abort);
|
|
946
|
+
}
|
|
947
|
+
}
|
|
948
|
+
/**
|
|
949
|
+
* An asynchronous mutex implementation.
|
|
950
|
+
*
|
|
951
|
+
* @internal This class is meant to be used in PowerSync SDKs only, and is not part of the public API.
|
|
952
|
+
*/
|
|
953
|
+
class Mutex {
|
|
954
|
+
inner = new Semaphore([null]);
|
|
955
|
+
async acquire(abort) {
|
|
956
|
+
const { release } = await this.inner.requestOne(abort);
|
|
957
|
+
return release;
|
|
958
|
+
}
|
|
959
|
+
async runExclusive(fn, abort) {
|
|
960
|
+
const returnMutex = await this.acquire(abort);
|
|
961
|
+
try {
|
|
962
|
+
return await fn();
|
|
963
|
+
}
|
|
964
|
+
finally {
|
|
965
|
+
returnMutex();
|
|
966
|
+
}
|
|
967
|
+
}
|
|
968
|
+
}
|
|
969
|
+
function timeoutSignal(timeout) {
|
|
970
|
+
if (timeout == null)
|
|
971
|
+
return;
|
|
972
|
+
if ('timeout' in AbortSignal)
|
|
973
|
+
return AbortSignal.timeout(timeout);
|
|
974
|
+
const controller = new AbortController();
|
|
975
|
+
setTimeout(() => controller.abort(new Error('Timeout waiting for lock')), timeout);
|
|
976
|
+
return controller.signal;
|
|
814
977
|
}
|
|
815
978
|
|
|
816
979
|
/**
|
|
@@ -822,7 +985,7 @@ class AttachmentService {
|
|
|
822
985
|
db;
|
|
823
986
|
logger;
|
|
824
987
|
tableName;
|
|
825
|
-
mutex = new
|
|
988
|
+
mutex = new Mutex();
|
|
826
989
|
context;
|
|
827
990
|
constructor(db, logger, tableName = 'attachments', archivedCacheLimit = 100) {
|
|
828
991
|
this.db = db;
|
|
@@ -859,7 +1022,7 @@ class AttachmentService {
|
|
|
859
1022
|
* Executes a callback with exclusive access to the attachment context.
|
|
860
1023
|
*/
|
|
861
1024
|
async withContext(callback) {
|
|
862
|
-
return
|
|
1025
|
+
return this.mutex.runExclusive(async () => {
|
|
863
1026
|
return callback(this.context);
|
|
864
1027
|
});
|
|
865
1028
|
}
|
|
@@ -895,9 +1058,15 @@ class AttachmentQueue {
|
|
|
895
1058
|
tableName;
|
|
896
1059
|
/** Logger instance for diagnostic information */
|
|
897
1060
|
logger;
|
|
898
|
-
/** Interval in milliseconds between periodic sync operations.
|
|
1061
|
+
/** Interval in milliseconds between periodic sync operations. Acts as a polling timer to retry
|
|
1062
|
+
* failed uploads/downloads, especially after the app goes offline. Default: 30000 (30 seconds) */
|
|
899
1063
|
syncIntervalMs = 30 * 1000;
|
|
900
|
-
/**
|
|
1064
|
+
/** Throttle duration in milliseconds for the reactive watch query on the attachments table.
|
|
1065
|
+
* When attachment records change, a watch query detects the change and triggers a sync.
|
|
1066
|
+
* This throttle prevents the sync from firing too rapidly when many changes happen in
|
|
1067
|
+
* quick succession (e.g., bulk inserts). This is distinct from syncIntervalMs — it controls
|
|
1068
|
+
* how quickly the queue reacts to changes, while syncIntervalMs controls how often it polls
|
|
1069
|
+
* for retries. Default: 30 (from DEFAULT_WATCH_THROTTLE_MS) */
|
|
901
1070
|
syncThrottleDuration;
|
|
902
1071
|
/** Whether to automatically download remote attachments. Default: true */
|
|
903
1072
|
downloadAttachments = true;
|
|
@@ -921,8 +1090,8 @@ class AttachmentQueue {
|
|
|
921
1090
|
* @param options.watchAttachments - Callback for monitoring attachment changes in your data model
|
|
922
1091
|
* @param options.tableName - Name of the table to store attachment records. Default: 'ps_attachment_queue'
|
|
923
1092
|
* @param options.logger - Logger instance. Defaults to db.logger
|
|
924
|
-
* @param options.syncIntervalMs -
|
|
925
|
-
* @param options.syncThrottleDuration - Throttle duration for
|
|
1093
|
+
* @param options.syncIntervalMs - Periodic polling interval in milliseconds for retrying failed uploads/downloads. Default: 30000
|
|
1094
|
+
* @param options.syncThrottleDuration - Throttle duration in milliseconds for the reactive watch query that detects attachment changes. Prevents rapid-fire syncs during bulk changes. Default: 30
|
|
926
1095
|
* @param options.downloadAttachments - Whether to automatically download remote attachments. Default: true
|
|
927
1096
|
* @param options.archivedCacheLimit - Maximum archived attachments before cleanup. Default: 100
|
|
928
1097
|
*/
|
|
@@ -1229,6 +1398,8 @@ exports.EncodingType = void 0;
|
|
|
1229
1398
|
EncodingType["Base64"] = "base64";
|
|
1230
1399
|
})(exports.EncodingType || (exports.EncodingType = {}));
|
|
1231
1400
|
|
|
1401
|
+
const symbolAsyncIterator = Symbol.asyncIterator ?? Symbol.for('Symbol.asyncIterator');
|
|
1402
|
+
|
|
1232
1403
|
function getDefaultExportFromCjs (x) {
|
|
1233
1404
|
return x && x.__esModule && Object.prototype.hasOwnProperty.call(x, 'default') ? x['default'] : x;
|
|
1234
1405
|
}
|
|
@@ -1309,7 +1480,7 @@ function requireEventIterator () {
|
|
|
1309
1480
|
this.removeCallback();
|
|
1310
1481
|
});
|
|
1311
1482
|
}
|
|
1312
|
-
[
|
|
1483
|
+
[symbolAsyncIterator]() {
|
|
1313
1484
|
return {
|
|
1314
1485
|
next: (value) => {
|
|
1315
1486
|
const result = this.pushQueue.shift();
|
|
@@ -1356,7 +1527,7 @@ function requireEventIterator () {
|
|
|
1356
1527
|
queue.eventHandlers[event] = fn;
|
|
1357
1528
|
},
|
|
1358
1529
|
}) || (() => { });
|
|
1359
|
-
this[
|
|
1530
|
+
this[symbolAsyncIterator] = () => queue[symbolAsyncIterator]();
|
|
1360
1531
|
Object.freeze(this);
|
|
1361
1532
|
}
|
|
1362
1533
|
}
|
|
@@ -1683,6 +1854,49 @@ var Logger = /*@__PURE__*/getDefaultExportFromCjs(loggerExports);
|
|
|
1683
1854
|
* Set of generic interfaces to allow PowerSync compatibility with
|
|
1684
1855
|
* different SQLite DB implementations.
|
|
1685
1856
|
*/
|
|
1857
|
+
/**
|
|
1858
|
+
* Implements {@link DBGetUtils} on a {@link SqlRunner}.
|
|
1859
|
+
*/
|
|
1860
|
+
function DBGetUtilsDefaultMixin(Base) {
|
|
1861
|
+
return class extends Base {
|
|
1862
|
+
async getAll(sql, parameters) {
|
|
1863
|
+
const res = await this.execute(sql, parameters);
|
|
1864
|
+
return res.rows?._array ?? [];
|
|
1865
|
+
}
|
|
1866
|
+
async getOptional(sql, parameters) {
|
|
1867
|
+
const res = await this.execute(sql, parameters);
|
|
1868
|
+
return res.rows?.item(0) ?? null;
|
|
1869
|
+
}
|
|
1870
|
+
async get(sql, parameters) {
|
|
1871
|
+
const res = await this.execute(sql, parameters);
|
|
1872
|
+
const first = res.rows?.item(0);
|
|
1873
|
+
if (!first) {
|
|
1874
|
+
throw new Error('Result set is empty');
|
|
1875
|
+
}
|
|
1876
|
+
return first;
|
|
1877
|
+
}
|
|
1878
|
+
async executeBatch(query, params = []) {
|
|
1879
|
+
// If this context can run batch statements natively, use that.
|
|
1880
|
+
// @ts-ignore
|
|
1881
|
+
if (super.executeBatch) {
|
|
1882
|
+
// @ts-ignore
|
|
1883
|
+
return super.executeBatch(query, params);
|
|
1884
|
+
}
|
|
1885
|
+
// Emulate executeBatch by running statements individually.
|
|
1886
|
+
let lastInsertId;
|
|
1887
|
+
let rowsAffected = 0;
|
|
1888
|
+
for (const set of params) {
|
|
1889
|
+
const result = await this.execute(query, set);
|
|
1890
|
+
lastInsertId = result.insertId;
|
|
1891
|
+
rowsAffected += result.rowsAffected;
|
|
1892
|
+
}
|
|
1893
|
+
return {
|
|
1894
|
+
rowsAffected,
|
|
1895
|
+
insertId: lastInsertId
|
|
1896
|
+
};
|
|
1897
|
+
}
|
|
1898
|
+
};
|
|
1899
|
+
}
|
|
1686
1900
|
/**
|
|
1687
1901
|
* Update table operation numbers from SQLite
|
|
1688
1902
|
*/
|
|
@@ -1692,6 +1906,89 @@ exports.RowUpdateType = void 0;
|
|
|
1692
1906
|
RowUpdateType[RowUpdateType["SQLITE_DELETE"] = 9] = "SQLITE_DELETE";
|
|
1693
1907
|
RowUpdateType[RowUpdateType["SQLITE_UPDATE"] = 23] = "SQLITE_UPDATE";
|
|
1694
1908
|
})(exports.RowUpdateType || (exports.RowUpdateType = {}));
|
|
1909
|
+
/**
|
|
1910
|
+
* A mixin to implement {@link DBAdapter} by delegating to {@link ConnectionPool.readLock} and
|
|
1911
|
+
* {@link ConnectionPool.writeLock}.
|
|
1912
|
+
*/
|
|
1913
|
+
function DBAdapterDefaultMixin(Base) {
|
|
1914
|
+
return class extends Base {
|
|
1915
|
+
readTransaction(fn, options) {
|
|
1916
|
+
return this.readLock((ctx) => TransactionImplementation.runWith(ctx, fn), options);
|
|
1917
|
+
}
|
|
1918
|
+
writeTransaction(fn, options) {
|
|
1919
|
+
return this.writeLock((ctx) => TransactionImplementation.runWith(ctx, fn), options);
|
|
1920
|
+
}
|
|
1921
|
+
getAll(sql, parameters) {
|
|
1922
|
+
return this.readLock((ctx) => ctx.getAll(sql, parameters));
|
|
1923
|
+
}
|
|
1924
|
+
getOptional(sql, parameters) {
|
|
1925
|
+
return this.readLock((ctx) => ctx.getOptional(sql, parameters));
|
|
1926
|
+
}
|
|
1927
|
+
get(sql, parameters) {
|
|
1928
|
+
return this.readLock((ctx) => ctx.get(sql, parameters));
|
|
1929
|
+
}
|
|
1930
|
+
execute(query, params) {
|
|
1931
|
+
return this.writeLock((ctx) => ctx.execute(query, params));
|
|
1932
|
+
}
|
|
1933
|
+
executeRaw(query, params) {
|
|
1934
|
+
return this.writeLock((ctx) => ctx.executeRaw(query, params));
|
|
1935
|
+
}
|
|
1936
|
+
executeBatch(query, params) {
|
|
1937
|
+
return this.writeTransaction((tx) => tx.executeBatch(query, params));
|
|
1938
|
+
}
|
|
1939
|
+
};
|
|
1940
|
+
}
|
|
1941
|
+
class BaseTransaction {
|
|
1942
|
+
inner;
|
|
1943
|
+
finalized = false;
|
|
1944
|
+
constructor(inner) {
|
|
1945
|
+
this.inner = inner;
|
|
1946
|
+
}
|
|
1947
|
+
async commit() {
|
|
1948
|
+
if (this.finalized) {
|
|
1949
|
+
return { rowsAffected: 0 };
|
|
1950
|
+
}
|
|
1951
|
+
this.finalized = true;
|
|
1952
|
+
return this.inner.execute('COMMIT');
|
|
1953
|
+
}
|
|
1954
|
+
async rollback() {
|
|
1955
|
+
if (this.finalized) {
|
|
1956
|
+
return { rowsAffected: 0 };
|
|
1957
|
+
}
|
|
1958
|
+
this.finalized = true;
|
|
1959
|
+
return this.inner.execute('ROLLBACK');
|
|
1960
|
+
}
|
|
1961
|
+
execute(query, params) {
|
|
1962
|
+
return this.inner.execute(query, params);
|
|
1963
|
+
}
|
|
1964
|
+
executeRaw(query, params) {
|
|
1965
|
+
return this.inner.executeRaw(query, params);
|
|
1966
|
+
}
|
|
1967
|
+
executeBatch(query, params) {
|
|
1968
|
+
return this.inner.executeBatch(query, params);
|
|
1969
|
+
}
|
|
1970
|
+
}
|
|
1971
|
+
class TransactionImplementation extends DBGetUtilsDefaultMixin(BaseTransaction) {
|
|
1972
|
+
static async runWith(ctx, fn) {
|
|
1973
|
+
let tx = new TransactionImplementation(ctx);
|
|
1974
|
+
try {
|
|
1975
|
+
await ctx.execute('BEGIN IMMEDIATE');
|
|
1976
|
+
const result = await fn(tx);
|
|
1977
|
+
await tx.commit();
|
|
1978
|
+
return result;
|
|
1979
|
+
}
|
|
1980
|
+
catch (ex) {
|
|
1981
|
+
try {
|
|
1982
|
+
await tx.rollback();
|
|
1983
|
+
}
|
|
1984
|
+
catch (ex2) {
|
|
1985
|
+
// In rare cases, a rollback may fail.
|
|
1986
|
+
// Safe to ignore.
|
|
1987
|
+
}
|
|
1988
|
+
throw ex;
|
|
1989
|
+
}
|
|
1990
|
+
}
|
|
1991
|
+
}
|
|
1695
1992
|
function isBatchedUpdateNotification(update) {
|
|
1696
1993
|
return 'tables' in update;
|
|
1697
1994
|
}
|
|
@@ -1834,16 +2131,12 @@ class SyncStatus {
|
|
|
1834
2131
|
*
|
|
1835
2132
|
* This returns null when the database is currently being opened and we don't have reliable information about all
|
|
1836
2133
|
* included streams yet.
|
|
1837
|
-
*
|
|
1838
|
-
* @experimental Sync streams are currently in alpha.
|
|
1839
2134
|
*/
|
|
1840
2135
|
get syncStreams() {
|
|
1841
2136
|
return this.options.dataFlow?.internalStreamSubscriptions?.map((core) => new SyncStreamStatusView(this, core));
|
|
1842
2137
|
}
|
|
1843
2138
|
/**
|
|
1844
2139
|
* If the `stream` appears in {@link syncStreams}, returns the current status for that stream.
|
|
1845
|
-
*
|
|
1846
|
-
* @experimental Sync streams are currently in alpha.
|
|
1847
2140
|
*/
|
|
1848
2141
|
forStream(stream) {
|
|
1849
2142
|
const asJson = JSON.stringify(stream.parameters);
|
|
@@ -2112,15 +2405,6 @@ class ControlledExecutor {
|
|
|
2112
2405
|
}
|
|
2113
2406
|
}
|
|
2114
2407
|
|
|
2115
|
-
/**
|
|
2116
|
-
* A ponyfill for `Symbol.asyncIterator` that is compatible with the
|
|
2117
|
-
* [recommended polyfill](https://github.com/Azure/azure-sdk-for-js/blob/%40azure/core-asynciterator-polyfill_1.0.2/sdk/core/core-asynciterator-polyfill/src/index.ts#L4-L6)
|
|
2118
|
-
* we recommend for React Native.
|
|
2119
|
-
*
|
|
2120
|
-
* As long as we use this symbol (instead of `for await` and `async *`) in this package, we can be compatible with async
|
|
2121
|
-
* iterators without requiring them.
|
|
2122
|
-
*/
|
|
2123
|
-
const symbolAsyncIterator = Symbol.asyncIterator ?? Symbol.for('Symbol.asyncIterator');
|
|
2124
2408
|
/**
|
|
2125
2409
|
* Throttle a function to be called at most once every "wait" milliseconds,
|
|
2126
2410
|
* on the trailing edge.
|
|
@@ -10457,177 +10741,10 @@ function requireDist () {
|
|
|
10457
10741
|
|
|
10458
10742
|
var distExports = requireDist();
|
|
10459
10743
|
|
|
10460
|
-
var version = "1.
|
|
10744
|
+
var version = "1.52.0";
|
|
10461
10745
|
var PACKAGE = {
|
|
10462
10746
|
version: version};
|
|
10463
10747
|
|
|
10464
|
-
const DEFAULT_PRESSURE_LIMITS = {
|
|
10465
|
-
highWater: 10,
|
|
10466
|
-
lowWater: 0
|
|
10467
|
-
};
|
|
10468
|
-
/**
|
|
10469
|
-
* A very basic implementation of a data stream with backpressure support which does not use
|
|
10470
|
-
* native JS streams or async iterators.
|
|
10471
|
-
* This is handy for environments such as React Native which need polyfills for the above.
|
|
10472
|
-
*/
|
|
10473
|
-
class DataStream extends BaseObserver {
|
|
10474
|
-
options;
|
|
10475
|
-
dataQueue;
|
|
10476
|
-
isClosed;
|
|
10477
|
-
processingPromise;
|
|
10478
|
-
notifyDataAdded;
|
|
10479
|
-
logger;
|
|
10480
|
-
mapLine;
|
|
10481
|
-
constructor(options) {
|
|
10482
|
-
super();
|
|
10483
|
-
this.options = options;
|
|
10484
|
-
this.processingPromise = null;
|
|
10485
|
-
this.isClosed = false;
|
|
10486
|
-
this.dataQueue = [];
|
|
10487
|
-
this.mapLine = options?.mapLine ?? ((line) => line);
|
|
10488
|
-
this.logger = options?.logger ?? Logger.get('DataStream');
|
|
10489
|
-
if (options?.closeOnError) {
|
|
10490
|
-
const l = this.registerListener({
|
|
10491
|
-
error: (ex) => {
|
|
10492
|
-
l?.();
|
|
10493
|
-
this.close();
|
|
10494
|
-
}
|
|
10495
|
-
});
|
|
10496
|
-
}
|
|
10497
|
-
}
|
|
10498
|
-
get highWatermark() {
|
|
10499
|
-
return this.options?.pressure?.highWaterMark ?? DEFAULT_PRESSURE_LIMITS.highWater;
|
|
10500
|
-
}
|
|
10501
|
-
get lowWatermark() {
|
|
10502
|
-
return this.options?.pressure?.lowWaterMark ?? DEFAULT_PRESSURE_LIMITS.lowWater;
|
|
10503
|
-
}
|
|
10504
|
-
get closed() {
|
|
10505
|
-
return this.isClosed;
|
|
10506
|
-
}
|
|
10507
|
-
async close() {
|
|
10508
|
-
this.isClosed = true;
|
|
10509
|
-
await this.processingPromise;
|
|
10510
|
-
this.iterateListeners((l) => l.closed?.());
|
|
10511
|
-
// Discard any data in the queue
|
|
10512
|
-
this.dataQueue = [];
|
|
10513
|
-
this.listeners.clear();
|
|
10514
|
-
}
|
|
10515
|
-
/**
|
|
10516
|
-
* Enqueues data for the consumers to read
|
|
10517
|
-
*/
|
|
10518
|
-
enqueueData(data) {
|
|
10519
|
-
if (this.isClosed) {
|
|
10520
|
-
throw new Error('Cannot enqueue data into closed stream.');
|
|
10521
|
-
}
|
|
10522
|
-
this.dataQueue.push(data);
|
|
10523
|
-
this.notifyDataAdded?.();
|
|
10524
|
-
this.processQueue();
|
|
10525
|
-
}
|
|
10526
|
-
/**
|
|
10527
|
-
* Reads data once from the data stream
|
|
10528
|
-
* @returns a Data payload or Null if the stream closed.
|
|
10529
|
-
*/
|
|
10530
|
-
async read() {
|
|
10531
|
-
if (this.closed) {
|
|
10532
|
-
return null;
|
|
10533
|
-
}
|
|
10534
|
-
// Wait for any pending processing to complete first.
|
|
10535
|
-
// This ensures we register our listener before calling processQueue(),
|
|
10536
|
-
// avoiding a race where processQueue() sees no reader and returns early.
|
|
10537
|
-
if (this.processingPromise) {
|
|
10538
|
-
await this.processingPromise;
|
|
10539
|
-
}
|
|
10540
|
-
// Re-check after await - stream may have closed while we were waiting
|
|
10541
|
-
if (this.closed) {
|
|
10542
|
-
return null;
|
|
10543
|
-
}
|
|
10544
|
-
return new Promise((resolve, reject) => {
|
|
10545
|
-
const l = this.registerListener({
|
|
10546
|
-
data: async (data) => {
|
|
10547
|
-
resolve(data);
|
|
10548
|
-
// Remove the listener
|
|
10549
|
-
l?.();
|
|
10550
|
-
},
|
|
10551
|
-
closed: () => {
|
|
10552
|
-
resolve(null);
|
|
10553
|
-
l?.();
|
|
10554
|
-
},
|
|
10555
|
-
error: (ex) => {
|
|
10556
|
-
reject(ex);
|
|
10557
|
-
l?.();
|
|
10558
|
-
}
|
|
10559
|
-
});
|
|
10560
|
-
this.processQueue();
|
|
10561
|
-
});
|
|
10562
|
-
}
|
|
10563
|
-
/**
|
|
10564
|
-
* Executes a callback for each data item in the stream
|
|
10565
|
-
*/
|
|
10566
|
-
forEach(callback) {
|
|
10567
|
-
if (this.dataQueue.length <= this.lowWatermark) {
|
|
10568
|
-
this.iterateAsyncErrored(async (l) => l.lowWater?.());
|
|
10569
|
-
}
|
|
10570
|
-
return this.registerListener({
|
|
10571
|
-
data: callback
|
|
10572
|
-
});
|
|
10573
|
-
}
|
|
10574
|
-
processQueue() {
|
|
10575
|
-
if (this.processingPromise) {
|
|
10576
|
-
return;
|
|
10577
|
-
}
|
|
10578
|
-
const promise = (this.processingPromise = this._processQueue());
|
|
10579
|
-
promise.finally(() => {
|
|
10580
|
-
this.processingPromise = null;
|
|
10581
|
-
});
|
|
10582
|
-
return promise;
|
|
10583
|
-
}
|
|
10584
|
-
hasDataReader() {
|
|
10585
|
-
return Array.from(this.listeners.values()).some((l) => !!l.data);
|
|
10586
|
-
}
|
|
10587
|
-
async _processQueue() {
|
|
10588
|
-
/**
|
|
10589
|
-
* Allow listeners to mutate the queue before processing.
|
|
10590
|
-
* This allows for operations such as dropping or compressing data
|
|
10591
|
-
* on high water or requesting more data on low water.
|
|
10592
|
-
*/
|
|
10593
|
-
if (this.dataQueue.length >= this.highWatermark) {
|
|
10594
|
-
await this.iterateAsyncErrored(async (l) => l.highWater?.());
|
|
10595
|
-
}
|
|
10596
|
-
if (this.isClosed || !this.hasDataReader()) {
|
|
10597
|
-
return;
|
|
10598
|
-
}
|
|
10599
|
-
if (this.dataQueue.length) {
|
|
10600
|
-
const data = this.dataQueue.shift();
|
|
10601
|
-
const mapped = this.mapLine(data);
|
|
10602
|
-
await this.iterateAsyncErrored(async (l) => l.data?.(mapped));
|
|
10603
|
-
}
|
|
10604
|
-
if (this.dataQueue.length <= this.lowWatermark) {
|
|
10605
|
-
const dataAdded = new Promise((resolve) => {
|
|
10606
|
-
this.notifyDataAdded = resolve;
|
|
10607
|
-
});
|
|
10608
|
-
await Promise.race([this.iterateAsyncErrored(async (l) => l.lowWater?.()), dataAdded]);
|
|
10609
|
-
this.notifyDataAdded = null;
|
|
10610
|
-
}
|
|
10611
|
-
if (this.dataQueue.length > 0) {
|
|
10612
|
-
setTimeout(() => this.processQueue());
|
|
10613
|
-
}
|
|
10614
|
-
}
|
|
10615
|
-
async iterateAsyncErrored(cb) {
|
|
10616
|
-
// Important: We need to copy the listeners, as calling a listener could result in adding another
|
|
10617
|
-
// listener, resulting in infinite loops.
|
|
10618
|
-
const listeners = Array.from(this.listeners.values());
|
|
10619
|
-
for (let i of listeners) {
|
|
10620
|
-
try {
|
|
10621
|
-
await cb(i);
|
|
10622
|
-
}
|
|
10623
|
-
catch (ex) {
|
|
10624
|
-
this.logger.error(ex);
|
|
10625
|
-
this.iterateListeners((l) => l.error?.(ex));
|
|
10626
|
-
}
|
|
10627
|
-
}
|
|
10628
|
-
}
|
|
10629
|
-
}
|
|
10630
|
-
|
|
10631
10748
|
var WebsocketDuplexConnection = {};
|
|
10632
10749
|
|
|
10633
10750
|
var hasRequiredWebsocketDuplexConnection;
|
|
@@ -10790,8 +10907,215 @@ class WebsocketClientTransport {
|
|
|
10790
10907
|
}
|
|
10791
10908
|
}
|
|
10792
10909
|
|
|
10910
|
+
const doneResult = { done: true, value: undefined };
|
|
10911
|
+
function valueResult(value) {
|
|
10912
|
+
return { done: false, value };
|
|
10913
|
+
}
|
|
10914
|
+
/**
|
|
10915
|
+
* A variant of {@link Array.map} for async iterators.
|
|
10916
|
+
*/
|
|
10917
|
+
function map(source, map) {
|
|
10918
|
+
return {
|
|
10919
|
+
next: async () => {
|
|
10920
|
+
const value = await source.next();
|
|
10921
|
+
if (value.done) {
|
|
10922
|
+
return value;
|
|
10923
|
+
}
|
|
10924
|
+
else {
|
|
10925
|
+
return { value: map(value.value) };
|
|
10926
|
+
}
|
|
10927
|
+
}
|
|
10928
|
+
};
|
|
10929
|
+
}
|
|
10930
|
+
/**
|
|
10931
|
+
* Expands a source async iterator by allowing to inject events asynchronously.
|
|
10932
|
+
*
|
|
10933
|
+
* The resulting iterator will emit all events from its source. Additionally though, events can be injected. These
|
|
10934
|
+
* events are dropped once the main iterator completes, but are otherwise forwarded.
|
|
10935
|
+
*
|
|
10936
|
+
* The iterator completes when its source completes, and it supports backpressure by only calling `next()` on the source
|
|
10937
|
+
* in response to a `next()` call from downstream if no pending injected events can be dispatched.
|
|
10938
|
+
*/
|
|
10939
|
+
function injectable(source) {
|
|
10940
|
+
let sourceIsDone = false;
|
|
10941
|
+
let waiter = undefined; // An active, waiting next() call.
|
|
10942
|
+
// A pending upstream event that couldn't be dispatched because inject() has been called before it was resolved.
|
|
10943
|
+
let pendingSourceEvent = null;
|
|
10944
|
+
let pendingInjectedEvents = [];
|
|
10945
|
+
const consumeWaiter = () => {
|
|
10946
|
+
const pending = waiter;
|
|
10947
|
+
waiter = undefined;
|
|
10948
|
+
return pending;
|
|
10949
|
+
};
|
|
10950
|
+
const fetchFromSource = () => {
|
|
10951
|
+
const resolveWaiter = (propagate) => {
|
|
10952
|
+
const active = consumeWaiter();
|
|
10953
|
+
if (active) {
|
|
10954
|
+
propagate(active);
|
|
10955
|
+
}
|
|
10956
|
+
else {
|
|
10957
|
+
pendingSourceEvent = propagate;
|
|
10958
|
+
}
|
|
10959
|
+
};
|
|
10960
|
+
const nextFromSource = source.next();
|
|
10961
|
+
nextFromSource.then((value) => {
|
|
10962
|
+
sourceIsDone = value.done == true;
|
|
10963
|
+
resolveWaiter((w) => w.resolve(value));
|
|
10964
|
+
}, (error) => {
|
|
10965
|
+
resolveWaiter((w) => w.reject(error));
|
|
10966
|
+
});
|
|
10967
|
+
};
|
|
10968
|
+
return {
|
|
10969
|
+
next: () => {
|
|
10970
|
+
return new Promise((resolve, reject) => {
|
|
10971
|
+
// First priority: Dispatch ready upstream events.
|
|
10972
|
+
if (sourceIsDone) {
|
|
10973
|
+
return resolve(doneResult);
|
|
10974
|
+
}
|
|
10975
|
+
if (pendingSourceEvent) {
|
|
10976
|
+
pendingSourceEvent({ resolve, reject });
|
|
10977
|
+
pendingSourceEvent = null;
|
|
10978
|
+
return;
|
|
10979
|
+
}
|
|
10980
|
+
// Second priority: Dispatch injected events
|
|
10981
|
+
if (pendingInjectedEvents.length) {
|
|
10982
|
+
return resolve(valueResult(pendingInjectedEvents.shift()));
|
|
10983
|
+
}
|
|
10984
|
+
// Nothing pending? Fetch from source
|
|
10985
|
+
waiter = { resolve, reject };
|
|
10986
|
+
return fetchFromSource();
|
|
10987
|
+
});
|
|
10988
|
+
},
|
|
10989
|
+
inject: (event) => {
|
|
10990
|
+
const pending = consumeWaiter();
|
|
10991
|
+
if (pending != null) {
|
|
10992
|
+
pending.resolve(valueResult(event));
|
|
10993
|
+
}
|
|
10994
|
+
else {
|
|
10995
|
+
pendingInjectedEvents.push(event);
|
|
10996
|
+
}
|
|
10997
|
+
}
|
|
10998
|
+
};
|
|
10999
|
+
}
|
|
11000
|
+
/**
|
|
11001
|
+
* Splits a byte stream at line endings, emitting each line as a string.
|
|
11002
|
+
*/
|
|
11003
|
+
function extractJsonLines(source, decoder) {
|
|
11004
|
+
let buffer = '';
|
|
11005
|
+
const pendingLines = [];
|
|
11006
|
+
let isFinalEvent = false;
|
|
11007
|
+
return {
|
|
11008
|
+
next: async () => {
|
|
11009
|
+
while (true) {
|
|
11010
|
+
if (isFinalEvent) {
|
|
11011
|
+
return doneResult;
|
|
11012
|
+
}
|
|
11013
|
+
{
|
|
11014
|
+
const first = pendingLines.shift();
|
|
11015
|
+
if (first) {
|
|
11016
|
+
return { done: false, value: first };
|
|
11017
|
+
}
|
|
11018
|
+
}
|
|
11019
|
+
const { done, value } = await source.next();
|
|
11020
|
+
if (done) {
|
|
11021
|
+
const remaining = buffer.trim();
|
|
11022
|
+
if (remaining.length != 0) {
|
|
11023
|
+
isFinalEvent = true;
|
|
11024
|
+
return { done: false, value: remaining };
|
|
11025
|
+
}
|
|
11026
|
+
return doneResult;
|
|
11027
|
+
}
|
|
11028
|
+
const data = decoder.decode(value, { stream: true });
|
|
11029
|
+
buffer += data;
|
|
11030
|
+
const lines = buffer.split('\n');
|
|
11031
|
+
for (let i = 0; i < lines.length - 1; i++) {
|
|
11032
|
+
const l = lines[i].trim();
|
|
11033
|
+
if (l.length > 0) {
|
|
11034
|
+
pendingLines.push(l);
|
|
11035
|
+
}
|
|
11036
|
+
}
|
|
11037
|
+
buffer = lines[lines.length - 1];
|
|
11038
|
+
}
|
|
11039
|
+
}
|
|
11040
|
+
};
|
|
11041
|
+
}
|
|
11042
|
+
/**
|
|
11043
|
+
* Splits a concatenated stream of BSON objects by emitting individual objects.
|
|
11044
|
+
*/
|
|
11045
|
+
function extractBsonObjects(source) {
|
|
11046
|
+
// Fully read but not emitted yet.
|
|
11047
|
+
const completedObjects = [];
|
|
11048
|
+
// Whether source has returned { done: true }. We do the same once completed objects have been emitted.
|
|
11049
|
+
let isDone = false;
|
|
11050
|
+
const lengthBuffer = new DataView(new ArrayBuffer(4));
|
|
11051
|
+
let objectBody = null;
|
|
11052
|
+
// If we're parsing the length field, a number between 1 and 4 (inclusive) describing remaining bytes in the header.
|
|
11053
|
+
// If we're consuming a document, the bytes remaining.
|
|
11054
|
+
let remainingLength = 4;
|
|
11055
|
+
return {
|
|
11056
|
+
async next() {
|
|
11057
|
+
while (true) {
|
|
11058
|
+
// Before fetching new data from upstream, return completed objects.
|
|
11059
|
+
if (completedObjects.length) {
|
|
11060
|
+
return valueResult(completedObjects.shift());
|
|
11061
|
+
}
|
|
11062
|
+
if (isDone) {
|
|
11063
|
+
return doneResult;
|
|
11064
|
+
}
|
|
11065
|
+
const upstreamEvent = await source.next();
|
|
11066
|
+
if (upstreamEvent.done) {
|
|
11067
|
+
isDone = true;
|
|
11068
|
+
if (objectBody || remainingLength != 4) {
|
|
11069
|
+
throw new Error('illegal end of stream in BSON object');
|
|
11070
|
+
}
|
|
11071
|
+
return doneResult;
|
|
11072
|
+
}
|
|
11073
|
+
const chunk = upstreamEvent.value;
|
|
11074
|
+
for (let i = 0; i < chunk.length;) {
|
|
11075
|
+
const availableInData = chunk.length - i;
|
|
11076
|
+
if (objectBody) {
|
|
11077
|
+
// We're in the middle of reading a BSON document.
|
|
11078
|
+
const bytesToRead = Math.min(availableInData, remainingLength);
|
|
11079
|
+
const copySource = new Uint8Array(chunk.buffer, chunk.byteOffset + i, bytesToRead);
|
|
11080
|
+
objectBody.set(copySource, objectBody.length - remainingLength);
|
|
11081
|
+
i += bytesToRead;
|
|
11082
|
+
remainingLength -= bytesToRead;
|
|
11083
|
+
if (remainingLength == 0) {
|
|
11084
|
+
completedObjects.push(objectBody);
|
|
11085
|
+
// Prepare to read another document, starting with its length
|
|
11086
|
+
objectBody = null;
|
|
11087
|
+
remainingLength = 4;
|
|
11088
|
+
}
|
|
11089
|
+
}
|
|
11090
|
+
else {
|
|
11091
|
+
// Copy up to 4 bytes into lengthBuffer, depending on how many we still need.
|
|
11092
|
+
const bytesToRead = Math.min(availableInData, remainingLength);
|
|
11093
|
+
for (let j = 0; j < bytesToRead; j++) {
|
|
11094
|
+
lengthBuffer.setUint8(4 - remainingLength + j, chunk[i + j]);
|
|
11095
|
+
}
|
|
11096
|
+
i += bytesToRead;
|
|
11097
|
+
remainingLength -= bytesToRead;
|
|
11098
|
+
if (remainingLength == 0) {
|
|
11099
|
+
// Transition from reading length header to reading document. Subtracting 4 because the length of the
|
|
11100
|
+
// header is included in length.
|
|
11101
|
+
const length = lengthBuffer.getInt32(0, true /* little endian */);
|
|
11102
|
+
remainingLength = length - 4;
|
|
11103
|
+
if (remainingLength < 1) {
|
|
11104
|
+
throw new Error(`invalid length for bson: ${length}`);
|
|
11105
|
+
}
|
|
11106
|
+
objectBody = new Uint8Array(length);
|
|
11107
|
+
new DataView(objectBody.buffer).setInt32(0, length, true);
|
|
11108
|
+
}
|
|
11109
|
+
}
|
|
11110
|
+
}
|
|
11111
|
+
}
|
|
11112
|
+
}
|
|
11113
|
+
};
|
|
11114
|
+
}
|
|
11115
|
+
|
|
10793
11116
|
const POWERSYNC_TRAILING_SLASH_MATCH = /\/+$/;
|
|
10794
11117
|
const POWERSYNC_JS_VERSION = PACKAGE.version;
|
|
11118
|
+
const SYNC_QUEUE_REQUEST_HIGH_WATER = 10;
|
|
10795
11119
|
const SYNC_QUEUE_REQUEST_LOW_WATER = 5;
|
|
10796
11120
|
// Keep alive message is sent every period
|
|
10797
11121
|
const KEEP_ALIVE_MS = 20_000;
|
|
@@ -10971,13 +11295,14 @@ class AbstractRemote {
|
|
|
10971
11295
|
return new WebSocket(url);
|
|
10972
11296
|
}
|
|
10973
11297
|
/**
|
|
10974
|
-
* Returns a data stream of sync line data.
|
|
11298
|
+
* Returns a data stream of sync line data, fetched via RSocket-over-WebSocket.
|
|
11299
|
+
*
|
|
11300
|
+
* The only mechanism to abort the returned stream is to use the abort signal in {@link SocketSyncStreamOptions}.
|
|
10975
11301
|
*
|
|
10976
|
-
* @param map Maps received payload frames to the typed event value.
|
|
10977
11302
|
* @param bson A BSON encoder and decoder. When set, the data stream will be requested with a BSON payload
|
|
10978
11303
|
* (required for compatibility with older sync services).
|
|
10979
11304
|
*/
|
|
10980
|
-
async socketStreamRaw(options,
|
|
11305
|
+
async socketStreamRaw(options, bson) {
|
|
10981
11306
|
const { path, fetchStrategy = exports.FetchStrategy.Buffered } = options;
|
|
10982
11307
|
const mimeType = bson == null ? 'application/json' : 'application/bson';
|
|
10983
11308
|
function toBuffer(js) {
|
|
@@ -10992,52 +11317,55 @@ class AbstractRemote {
|
|
|
10992
11317
|
}
|
|
10993
11318
|
const syncQueueRequestSize = fetchStrategy == exports.FetchStrategy.Buffered ? 10 : 1;
|
|
10994
11319
|
const request = await this.buildRequest(path);
|
|
11320
|
+
const url = this.options.socketUrlTransformer(request.url);
|
|
10995
11321
|
// Add the user agent in the setup payload - we can't set custom
|
|
10996
11322
|
// headers with websockets on web. The browser userAgent is however added
|
|
10997
11323
|
// automatically as a header.
|
|
10998
11324
|
const userAgent = this.getUserAgent();
|
|
10999
|
-
|
|
11000
|
-
|
|
11001
|
-
|
|
11002
|
-
|
|
11003
|
-
|
|
11004
|
-
|
|
11005
|
-
|
|
11325
|
+
// While we're connecting (a process that can't be aborted in RSocket), the WebSocket instance to close if we wanted
|
|
11326
|
+
// to abort the connection.
|
|
11327
|
+
let pendingSocket = null;
|
|
11328
|
+
let keepAliveTimeout;
|
|
11329
|
+
let rsocket = null;
|
|
11330
|
+
let queue = null;
|
|
11331
|
+
let didClose = false;
|
|
11332
|
+
const abortRequest = () => {
|
|
11333
|
+
if (didClose) {
|
|
11334
|
+
return;
|
|
11335
|
+
}
|
|
11336
|
+
didClose = true;
|
|
11337
|
+
clearTimeout(keepAliveTimeout);
|
|
11338
|
+
if (pendingSocket) {
|
|
11339
|
+
pendingSocket.close();
|
|
11340
|
+
}
|
|
11341
|
+
if (rsocket) {
|
|
11342
|
+
rsocket.close();
|
|
11343
|
+
}
|
|
11344
|
+
if (queue) {
|
|
11345
|
+
queue.stop();
|
|
11346
|
+
}
|
|
11347
|
+
};
|
|
11006
11348
|
// Handle upstream abort
|
|
11007
|
-
if (options.abortSignal
|
|
11349
|
+
if (options.abortSignal.aborted) {
|
|
11008
11350
|
throw new AbortOperation('Connection request aborted');
|
|
11009
11351
|
}
|
|
11010
11352
|
else {
|
|
11011
|
-
options.abortSignal
|
|
11012
|
-
stream.close();
|
|
11013
|
-
}, { once: true });
|
|
11353
|
+
options.abortSignal.addEventListener('abort', abortRequest);
|
|
11014
11354
|
}
|
|
11015
|
-
let keepAliveTimeout;
|
|
11016
11355
|
const resetTimeout = () => {
|
|
11017
11356
|
clearTimeout(keepAliveTimeout);
|
|
11018
11357
|
keepAliveTimeout = setTimeout(() => {
|
|
11019
11358
|
this.logger.error(`No data received on WebSocket in ${SOCKET_TIMEOUT_MS}ms, closing connection.`);
|
|
11020
|
-
|
|
11359
|
+
abortRequest();
|
|
11021
11360
|
}, SOCKET_TIMEOUT_MS);
|
|
11022
11361
|
};
|
|
11023
11362
|
resetTimeout();
|
|
11024
|
-
// Typescript complains about this being `never` if it's not assigned here.
|
|
11025
|
-
// This is assigned in `wsCreator`.
|
|
11026
|
-
let disposeSocketConnectionTimeout = () => { };
|
|
11027
|
-
const url = this.options.socketUrlTransformer(request.url);
|
|
11028
11363
|
const connector = new distExports.RSocketConnector({
|
|
11029
11364
|
transport: new WebsocketClientTransport({
|
|
11030
11365
|
url,
|
|
11031
11366
|
wsCreator: (url) => {
|
|
11032
|
-
const socket = this.createSocket(url);
|
|
11033
|
-
|
|
11034
|
-
closed: () => {
|
|
11035
|
-
// Allow closing the underlying WebSocket if the stream was closed before the
|
|
11036
|
-
// RSocket connect completed. This should effectively abort the request.
|
|
11037
|
-
socket.close();
|
|
11038
|
-
}
|
|
11039
|
-
});
|
|
11040
|
-
socket.addEventListener('message', (event) => {
|
|
11367
|
+
const socket = (pendingSocket = this.createSocket(url));
|
|
11368
|
+
socket.addEventListener('message', () => {
|
|
11041
11369
|
resetTimeout();
|
|
11042
11370
|
});
|
|
11043
11371
|
return socket;
|
|
@@ -11057,43 +11385,40 @@ class AbstractRemote {
|
|
|
11057
11385
|
}
|
|
11058
11386
|
}
|
|
11059
11387
|
});
|
|
11060
|
-
let rsocket;
|
|
11061
11388
|
try {
|
|
11062
11389
|
rsocket = await connector.connect();
|
|
11063
11390
|
// The connection is established, we no longer need to monitor the initial timeout
|
|
11064
|
-
|
|
11391
|
+
pendingSocket = null;
|
|
11065
11392
|
}
|
|
11066
11393
|
catch (ex) {
|
|
11067
11394
|
this.logger.error(`Failed to connect WebSocket`, ex);
|
|
11068
|
-
|
|
11069
|
-
if (!stream.closed) {
|
|
11070
|
-
await stream.close();
|
|
11071
|
-
}
|
|
11395
|
+
abortRequest();
|
|
11072
11396
|
throw ex;
|
|
11073
11397
|
}
|
|
11074
11398
|
resetTimeout();
|
|
11075
|
-
let socketIsClosed = false;
|
|
11076
|
-
const closeSocket = () => {
|
|
11077
|
-
clearTimeout(keepAliveTimeout);
|
|
11078
|
-
if (socketIsClosed) {
|
|
11079
|
-
return;
|
|
11080
|
-
}
|
|
11081
|
-
socketIsClosed = true;
|
|
11082
|
-
rsocket.close();
|
|
11083
|
-
};
|
|
11084
11399
|
// Helps to prevent double close scenarios
|
|
11085
|
-
rsocket.onClose(() => (
|
|
11086
|
-
|
|
11087
|
-
let pendingEventsCount = syncQueueRequestSize;
|
|
11088
|
-
const disposeClosedListener = stream.registerListener({
|
|
11089
|
-
closed: () => {
|
|
11090
|
-
closeSocket();
|
|
11091
|
-
disposeClosedListener();
|
|
11092
|
-
}
|
|
11093
|
-
});
|
|
11094
|
-
const socket = await new Promise((resolve, reject) => {
|
|
11400
|
+
rsocket.onClose(() => (rsocket = null));
|
|
11401
|
+
return await new Promise((resolve, reject) => {
|
|
11095
11402
|
let connectionEstablished = false;
|
|
11096
|
-
|
|
11403
|
+
let pendingEventsCount = syncQueueRequestSize;
|
|
11404
|
+
let paused = false;
|
|
11405
|
+
let res = null;
|
|
11406
|
+
function requestMore() {
|
|
11407
|
+
const delta = syncQueueRequestSize - pendingEventsCount;
|
|
11408
|
+
if (!paused && delta > 0) {
|
|
11409
|
+
res?.request(delta);
|
|
11410
|
+
pendingEventsCount = syncQueueRequestSize;
|
|
11411
|
+
}
|
|
11412
|
+
}
|
|
11413
|
+
const events = new domExports.EventIterator((q) => {
|
|
11414
|
+
queue = q;
|
|
11415
|
+
q.on('highWater', () => (paused = true));
|
|
11416
|
+
q.on('lowWater', () => {
|
|
11417
|
+
paused = false;
|
|
11418
|
+
requestMore();
|
|
11419
|
+
});
|
|
11420
|
+
}, { highWaterMark: SYNC_QUEUE_REQUEST_HIGH_WATER, lowWaterMark: SYNC_QUEUE_REQUEST_LOW_WATER })[symbolAsyncIterator]();
|
|
11421
|
+
res = rsocket.requestStream({
|
|
11097
11422
|
data: toBuffer(options.data),
|
|
11098
11423
|
metadata: toBuffer({
|
|
11099
11424
|
path
|
|
@@ -11118,7 +11443,7 @@ class AbstractRemote {
|
|
|
11118
11443
|
}
|
|
11119
11444
|
// RSocket will close the RSocket stream automatically
|
|
11120
11445
|
// Close the downstream stream as well - this will close the RSocket connection and WebSocket
|
|
11121
|
-
|
|
11446
|
+
abortRequest();
|
|
11122
11447
|
// Handles cases where the connection failed e.g. auth error or connection error
|
|
11123
11448
|
if (!connectionEstablished) {
|
|
11124
11449
|
reject(e);
|
|
@@ -11128,41 +11453,40 @@ class AbstractRemote {
|
|
|
11128
11453
|
// The connection is active
|
|
11129
11454
|
if (!connectionEstablished) {
|
|
11130
11455
|
connectionEstablished = true;
|
|
11131
|
-
resolve(
|
|
11456
|
+
resolve(events);
|
|
11132
11457
|
}
|
|
11133
11458
|
const { data } = payload;
|
|
11459
|
+
if (data) {
|
|
11460
|
+
queue.push(data);
|
|
11461
|
+
}
|
|
11134
11462
|
// Less events are now pending
|
|
11135
11463
|
pendingEventsCount--;
|
|
11136
|
-
|
|
11137
|
-
|
|
11138
|
-
}
|
|
11139
|
-
stream.enqueueData(data);
|
|
11464
|
+
// Request another event (unless the downstream consumer is paused).
|
|
11465
|
+
requestMore();
|
|
11140
11466
|
},
|
|
11141
11467
|
onComplete: () => {
|
|
11142
|
-
|
|
11468
|
+
abortRequest(); // this will also emit a done event
|
|
11143
11469
|
},
|
|
11144
11470
|
onExtension: () => { }
|
|
11145
11471
|
});
|
|
11146
11472
|
});
|
|
11147
|
-
const l = stream.registerListener({
|
|
11148
|
-
lowWater: async () => {
|
|
11149
|
-
// Request to fill up the queue
|
|
11150
|
-
const required = syncQueueRequestSize - pendingEventsCount;
|
|
11151
|
-
if (required > 0) {
|
|
11152
|
-
socket.request(syncQueueRequestSize - pendingEventsCount);
|
|
11153
|
-
pendingEventsCount = syncQueueRequestSize;
|
|
11154
|
-
}
|
|
11155
|
-
},
|
|
11156
|
-
closed: () => {
|
|
11157
|
-
l();
|
|
11158
|
-
}
|
|
11159
|
-
});
|
|
11160
|
-
return stream;
|
|
11161
11473
|
}
|
|
11162
11474
|
/**
|
|
11163
|
-
*
|
|
11475
|
+
* @returns Whether the HTTP implementation on this platform can receive streamed binary responses. This is true on
|
|
11476
|
+
* all platforms except React Native (who would have guessed...), where we must not request BSON responses.
|
|
11477
|
+
*
|
|
11478
|
+
* @see https://github.com/react-native-community/fetch?tab=readme-ov-file#motivation
|
|
11164
11479
|
*/
|
|
11165
|
-
|
|
11480
|
+
get supportsStreamingBinaryResponses() {
|
|
11481
|
+
return true;
|
|
11482
|
+
}
|
|
11483
|
+
/**
|
|
11484
|
+
* Posts a `/sync/stream` request, asserts that it completes successfully and returns the streaming response as an
|
|
11485
|
+
* async iterator of byte blobs.
|
|
11486
|
+
*
|
|
11487
|
+
* To cancel the async iterator, use the abort signal from {@link SyncStreamOptions} passed to this method.
|
|
11488
|
+
*/
|
|
11489
|
+
async fetchStreamRaw(options) {
|
|
11166
11490
|
const { data, path, headers, abortSignal } = options;
|
|
11167
11491
|
const request = await this.buildRequest(path);
|
|
11168
11492
|
/**
|
|
@@ -11174,119 +11498,94 @@ class AbstractRemote {
|
|
|
11174
11498
|
* Aborting the active fetch request while it is being consumed seems to throw
|
|
11175
11499
|
* an unhandled exception on the window level.
|
|
11176
11500
|
*/
|
|
11177
|
-
if (abortSignal
|
|
11178
|
-
throw new AbortOperation('Abort request received before making
|
|
11501
|
+
if (abortSignal.aborted) {
|
|
11502
|
+
throw new AbortOperation('Abort request received before making fetchStreamRaw request');
|
|
11179
11503
|
}
|
|
11180
11504
|
const controller = new AbortController();
|
|
11181
|
-
let
|
|
11182
|
-
abortSignal
|
|
11183
|
-
|
|
11505
|
+
let reader = null;
|
|
11506
|
+
abortSignal.addEventListener('abort', () => {
|
|
11507
|
+
const reason = abortSignal.reason ??
|
|
11508
|
+
new AbortOperation('Cancelling network request before it resolves. Abort signal has been received.');
|
|
11509
|
+
if (reader == null) {
|
|
11184
11510
|
// Only abort via the abort controller if the request has not resolved yet
|
|
11185
|
-
controller.abort(
|
|
11186
|
-
|
|
11511
|
+
controller.abort(reason);
|
|
11512
|
+
}
|
|
11513
|
+
else {
|
|
11514
|
+
reader.cancel(reason).catch(() => {
|
|
11515
|
+
// Cancelling the reader might rethrow an exception we would have handled by throwing in next(). So we can
|
|
11516
|
+
// ignore it here.
|
|
11517
|
+
});
|
|
11187
11518
|
}
|
|
11188
11519
|
});
|
|
11189
|
-
|
|
11190
|
-
|
|
11191
|
-
|
|
11192
|
-
|
|
11193
|
-
|
|
11194
|
-
|
|
11195
|
-
|
|
11196
|
-
|
|
11197
|
-
|
|
11520
|
+
let res;
|
|
11521
|
+
let responseIsBson = false;
|
|
11522
|
+
try {
|
|
11523
|
+
const ndJson = 'application/x-ndjson';
|
|
11524
|
+
const bson = 'application/vnd.powersync.bson-stream';
|
|
11525
|
+
res = await this.fetch(request.url, {
|
|
11526
|
+
method: 'POST',
|
|
11527
|
+
headers: {
|
|
11528
|
+
...headers,
|
|
11529
|
+
...request.headers,
|
|
11530
|
+
accept: this.supportsStreamingBinaryResponses ? `${bson};q=0.9,${ndJson};q=0.8` : ndJson
|
|
11531
|
+
},
|
|
11532
|
+
body: JSON.stringify(data),
|
|
11533
|
+
signal: controller.signal,
|
|
11534
|
+
cache: 'no-store',
|
|
11535
|
+
...(this.options.fetchOptions ?? {}),
|
|
11536
|
+
...options.fetchOptions
|
|
11537
|
+
});
|
|
11538
|
+
if (!res.ok || !res.body) {
|
|
11539
|
+
const text = await res.text();
|
|
11540
|
+
this.logger.error(`Could not POST streaming to ${path} - ${res.status} - ${res.statusText}: ${text}`);
|
|
11541
|
+
const error = new Error(`HTTP ${res.statusText}: ${text}`);
|
|
11542
|
+
error.status = res.status;
|
|
11543
|
+
throw error;
|
|
11544
|
+
}
|
|
11545
|
+
const contentType = res.headers.get('content-type');
|
|
11546
|
+
responseIsBson = contentType == bson;
|
|
11547
|
+
}
|
|
11548
|
+
catch (ex) {
|
|
11198
11549
|
if (ex.name == 'AbortError') {
|
|
11199
11550
|
throw new AbortOperation(`Pending fetch request to ${request.url} has been aborted.`);
|
|
11200
11551
|
}
|
|
11201
11552
|
throw ex;
|
|
11202
|
-
});
|
|
11203
|
-
if (!res) {
|
|
11204
|
-
throw new Error('Fetch request was aborted');
|
|
11205
|
-
}
|
|
11206
|
-
requestResolved = true;
|
|
11207
|
-
if (!res.ok || !res.body) {
|
|
11208
|
-
const text = await res.text();
|
|
11209
|
-
this.logger.error(`Could not POST streaming to ${path} - ${res.status} - ${res.statusText}: ${text}`);
|
|
11210
|
-
const error = new Error(`HTTP ${res.statusText}: ${text}`);
|
|
11211
|
-
error.status = res.status;
|
|
11212
|
-
throw error;
|
|
11213
11553
|
}
|
|
11214
|
-
|
|
11215
|
-
|
|
11216
|
-
|
|
11217
|
-
|
|
11218
|
-
|
|
11219
|
-
const closeReader = async () => {
|
|
11220
|
-
try {
|
|
11221
|
-
readerReleased = true;
|
|
11222
|
-
await reader.cancel();
|
|
11223
|
-
}
|
|
11224
|
-
catch (ex) {
|
|
11225
|
-
// an error will throw if the reader hasn't been used yet
|
|
11226
|
-
}
|
|
11227
|
-
reader.releaseLock();
|
|
11228
|
-
};
|
|
11229
|
-
const stream = new DataStream({
|
|
11230
|
-
logger: this.logger,
|
|
11231
|
-
mapLine: mapLine,
|
|
11232
|
-
pressure: {
|
|
11233
|
-
highWaterMark: 20,
|
|
11234
|
-
lowWaterMark: 10
|
|
11235
|
-
}
|
|
11236
|
-
});
|
|
11237
|
-
abortSignal?.addEventListener('abort', () => {
|
|
11238
|
-
closeReader();
|
|
11239
|
-
stream.close();
|
|
11240
|
-
});
|
|
11241
|
-
const decoder = this.createTextDecoder();
|
|
11242
|
-
let buffer = '';
|
|
11243
|
-
const consumeStream = async () => {
|
|
11244
|
-
while (!stream.closed && !abortSignal?.aborted && !readerReleased) {
|
|
11245
|
-
const { done, value } = await reader.read();
|
|
11246
|
-
if (done) {
|
|
11247
|
-
const remaining = buffer.trim();
|
|
11248
|
-
if (remaining.length != 0) {
|
|
11249
|
-
stream.enqueueData(remaining);
|
|
11250
|
-
}
|
|
11251
|
-
stream.close();
|
|
11252
|
-
await closeReader();
|
|
11253
|
-
return;
|
|
11554
|
+
reader = res.body.getReader();
|
|
11555
|
+
const stream = {
|
|
11556
|
+
next: async () => {
|
|
11557
|
+
if (controller.signal.aborted) {
|
|
11558
|
+
return doneResult;
|
|
11254
11559
|
}
|
|
11255
|
-
|
|
11256
|
-
|
|
11257
|
-
const lines = buffer.split('\n');
|
|
11258
|
-
for (var i = 0; i < lines.length - 1; i++) {
|
|
11259
|
-
var l = lines[i].trim();
|
|
11260
|
-
if (l.length > 0) {
|
|
11261
|
-
stream.enqueueData(l);
|
|
11262
|
-
}
|
|
11560
|
+
try {
|
|
11561
|
+
return await reader.read();
|
|
11263
11562
|
}
|
|
11264
|
-
|
|
11265
|
-
|
|
11266
|
-
|
|
11267
|
-
|
|
11268
|
-
|
|
11269
|
-
|
|
11270
|
-
|
|
11271
|
-
dispose();
|
|
11272
|
-
},
|
|
11273
|
-
closed: () => {
|
|
11274
|
-
resolve();
|
|
11275
|
-
dispose();
|
|
11276
|
-
}
|
|
11277
|
-
});
|
|
11278
|
-
});
|
|
11563
|
+
catch (ex) {
|
|
11564
|
+
if (controller.signal.aborted) {
|
|
11565
|
+
// .read() completes with an error if we cancel the reader, which we do to disconnect. So this is just
|
|
11566
|
+
// things working as intended, we can return a done event and consider the exception handled.
|
|
11567
|
+
return doneResult;
|
|
11568
|
+
}
|
|
11569
|
+
throw ex;
|
|
11279
11570
|
}
|
|
11280
11571
|
}
|
|
11281
11572
|
};
|
|
11282
|
-
|
|
11283
|
-
|
|
11284
|
-
|
|
11285
|
-
|
|
11286
|
-
|
|
11287
|
-
|
|
11288
|
-
|
|
11289
|
-
|
|
11573
|
+
return { isBson: responseIsBson, stream };
|
|
11574
|
+
}
|
|
11575
|
+
/**
|
|
11576
|
+
* Posts a `/sync/stream` request.
|
|
11577
|
+
*
|
|
11578
|
+
* Depending on the `Content-Type` of the response, this returns strings for sync lines or encoded BSON documents as
|
|
11579
|
+
* {@link Uint8Array}s.
|
|
11580
|
+
*/
|
|
11581
|
+
async fetchStream(options) {
|
|
11582
|
+
const { isBson, stream } = await this.fetchStreamRaw(options);
|
|
11583
|
+
if (isBson) {
|
|
11584
|
+
return extractBsonObjects(stream);
|
|
11585
|
+
}
|
|
11586
|
+
else {
|
|
11587
|
+
return extractJsonLines(stream, this.createTextDecoder());
|
|
11588
|
+
}
|
|
11290
11589
|
}
|
|
11291
11590
|
}
|
|
11292
11591
|
|
|
@@ -11421,6 +11720,7 @@ class AbstractStreamingSyncImplementation extends BaseObserver {
|
|
|
11421
11720
|
streamingSyncPromise;
|
|
11422
11721
|
logger;
|
|
11423
11722
|
activeStreams;
|
|
11723
|
+
connectionMayHaveChanged = false;
|
|
11424
11724
|
isUploadingCrud = false;
|
|
11425
11725
|
notifyCompletedUploads;
|
|
11426
11726
|
handleActiveStreamsChange;
|
|
@@ -11700,6 +12000,11 @@ The next upload iteration will be delayed.`);
|
|
|
11700
12000
|
shouldDelayRetry = false;
|
|
11701
12001
|
// A disconnect was requested, we should not delay since there is no explicit retry
|
|
11702
12002
|
}
|
|
12003
|
+
else if (this.connectionMayHaveChanged && ex.message?.indexOf('No iteration is active') >= 0) {
|
|
12004
|
+
this.connectionMayHaveChanged = false;
|
|
12005
|
+
this.logger.info('Sync error after changed connection, retrying immediately');
|
|
12006
|
+
shouldDelayRetry = false;
|
|
12007
|
+
}
|
|
11703
12008
|
else {
|
|
11704
12009
|
this.logger.error(ex);
|
|
11705
12010
|
}
|
|
@@ -11730,6 +12035,15 @@ The next upload iteration will be delayed.`);
|
|
|
11730
12035
|
// Mark as disconnected if here
|
|
11731
12036
|
this.updateSyncStatus({ connected: false, connecting: false });
|
|
11732
12037
|
}
|
|
12038
|
+
markConnectionMayHaveChanged() {
|
|
12039
|
+
// By setting this field, we'll immediately retry if the next sync event causes an error triggered by us not having
|
|
12040
|
+
// an active sync iteration on the connection in use.
|
|
12041
|
+
this.connectionMayHaveChanged = true;
|
|
12042
|
+
// This triggers a `powersync_control` invocation if a sync iteration is currently active. This is a cheap call to
|
|
12043
|
+
// make when no subscriptions have actually changed, we're mainly interested in this immediately throwing if no
|
|
12044
|
+
// iteration is active. That allows us to reconnect ASAP, instead of having to wait for the next sync line.
|
|
12045
|
+
this.handleActiveStreamsChange?.();
|
|
12046
|
+
}
|
|
11733
12047
|
async collectLocalBucketState() {
|
|
11734
12048
|
const bucketEntries = await this.options.adapter.getBucketStates();
|
|
11735
12049
|
const req = bucketEntries.map((entry) => ({
|
|
@@ -11794,6 +12108,19 @@ The next upload iteration will be delayed.`);
|
|
|
11794
12108
|
}
|
|
11795
12109
|
});
|
|
11796
12110
|
}
|
|
12111
|
+
async receiveSyncLines(data) {
|
|
12112
|
+
const { options, connection, bson } = data;
|
|
12113
|
+
const remote = this.options.remote;
|
|
12114
|
+
if (connection.connectionMethod == exports.SyncStreamConnectionMethod.HTTP) {
|
|
12115
|
+
return await remote.fetchStream(options);
|
|
12116
|
+
}
|
|
12117
|
+
else {
|
|
12118
|
+
return await this.options.remote.socketStreamRaw({
|
|
12119
|
+
...options,
|
|
12120
|
+
...{ fetchStrategy: connection.fetchStrategy }
|
|
12121
|
+
}, bson);
|
|
12122
|
+
}
|
|
12123
|
+
}
|
|
11797
12124
|
async legacyStreamingSyncIteration(signal, resolvedOptions) {
|
|
11798
12125
|
const rawTables = resolvedOptions.serializedSchema?.raw_tables;
|
|
11799
12126
|
if (rawTables != null && rawTables.length) {
|
|
@@ -11823,42 +12150,27 @@ The next upload iteration will be delayed.`);
|
|
|
11823
12150
|
client_id: clientId
|
|
11824
12151
|
}
|
|
11825
12152
|
};
|
|
11826
|
-
|
|
11827
|
-
|
|
11828
|
-
|
|
11829
|
-
|
|
11830
|
-
|
|
11831
|
-
|
|
11832
|
-
|
|
11833
|
-
|
|
11834
|
-
|
|
11835
|
-
|
|
11836
|
-
|
|
11837
|
-
|
|
11838
|
-
|
|
11839
|
-
|
|
11840
|
-
stream = await this.options.remote.socketStreamRaw({
|
|
11841
|
-
...syncOptions,
|
|
11842
|
-
...{ fetchStrategy: resolvedOptions.fetchStrategy }
|
|
11843
|
-
}, (payload) => {
|
|
11844
|
-
if (payload instanceof Uint8Array) {
|
|
11845
|
-
return bson.deserialize(payload);
|
|
11846
|
-
}
|
|
11847
|
-
else {
|
|
11848
|
-
// Directly enqueued by us
|
|
11849
|
-
return payload;
|
|
11850
|
-
}
|
|
11851
|
-
}, bson);
|
|
11852
|
-
}
|
|
12153
|
+
const bson = await this.options.remote.getBSON();
|
|
12154
|
+
const source = await this.receiveSyncLines({
|
|
12155
|
+
options: syncOptions,
|
|
12156
|
+
connection: resolvedOptions,
|
|
12157
|
+
bson
|
|
12158
|
+
});
|
|
12159
|
+
const stream = injectable(map(source, (line) => {
|
|
12160
|
+
if (typeof line == 'string') {
|
|
12161
|
+
return JSON.parse(line);
|
|
12162
|
+
}
|
|
12163
|
+
else {
|
|
12164
|
+
return bson.deserialize(line);
|
|
12165
|
+
}
|
|
12166
|
+
}));
|
|
11853
12167
|
this.logger.debug('Stream established. Processing events');
|
|
11854
12168
|
this.notifyCompletedUploads = () => {
|
|
11855
|
-
|
|
11856
|
-
stream.enqueueData({ crud_upload_completed: null });
|
|
11857
|
-
}
|
|
12169
|
+
stream.inject({ crud_upload_completed: null });
|
|
11858
12170
|
};
|
|
11859
|
-
while (
|
|
11860
|
-
const line = await stream.
|
|
11861
|
-
if (
|
|
12171
|
+
while (true) {
|
|
12172
|
+
const { value: line, done } = await stream.next();
|
|
12173
|
+
if (done) {
|
|
11862
12174
|
// The stream has closed while waiting
|
|
11863
12175
|
return;
|
|
11864
12176
|
}
|
|
@@ -12037,14 +12349,17 @@ The next upload iteration will be delayed.`);
|
|
|
12037
12349
|
const syncImplementation = this;
|
|
12038
12350
|
const adapter = this.options.adapter;
|
|
12039
12351
|
const remote = this.options.remote;
|
|
12352
|
+
const controller = new AbortController();
|
|
12353
|
+
const abort = () => {
|
|
12354
|
+
return controller.abort(signal.reason);
|
|
12355
|
+
};
|
|
12356
|
+
signal.addEventListener('abort', abort);
|
|
12040
12357
|
let receivingLines = null;
|
|
12041
12358
|
let hadSyncLine = false;
|
|
12042
12359
|
let hideDisconnectOnRestart = false;
|
|
12043
12360
|
if (signal.aborted) {
|
|
12044
12361
|
throw new AbortOperation('Connection request has been aborted');
|
|
12045
12362
|
}
|
|
12046
|
-
const abortController = new AbortController();
|
|
12047
|
-
signal.addEventListener('abort', () => abortController.abort());
|
|
12048
12363
|
// Pending sync lines received from the service, as well as local events that trigger a powersync_control
|
|
12049
12364
|
// invocation (local events include refreshed tokens and completed uploads).
|
|
12050
12365
|
// This is a single data stream so that we can handle all control calls from a single place.
|
|
@@ -12052,49 +12367,36 @@ The next upload iteration will be delayed.`);
|
|
|
12052
12367
|
async function connect(instr) {
|
|
12053
12368
|
const syncOptions = {
|
|
12054
12369
|
path: '/sync/stream',
|
|
12055
|
-
abortSignal:
|
|
12370
|
+
abortSignal: controller.signal,
|
|
12056
12371
|
data: instr.request
|
|
12057
12372
|
};
|
|
12058
|
-
|
|
12059
|
-
|
|
12060
|
-
|
|
12061
|
-
|
|
12062
|
-
|
|
12063
|
-
|
|
12064
|
-
|
|
12065
|
-
|
|
12066
|
-
|
|
12067
|
-
|
|
12068
|
-
|
|
12069
|
-
|
|
12070
|
-
|
|
12071
|
-
|
|
12072
|
-
|
|
12073
|
-
|
|
12074
|
-
|
|
12075
|
-
fetchStrategy: resolvedOptions.fetchStrategy
|
|
12076
|
-
}, (payload) => {
|
|
12077
|
-
if (payload instanceof Uint8Array) {
|
|
12078
|
-
return {
|
|
12079
|
-
command: exports.PowerSyncControlCommand.PROCESS_BSON_LINE,
|
|
12080
|
-
payload: payload
|
|
12081
|
-
};
|
|
12082
|
-
}
|
|
12083
|
-
else {
|
|
12084
|
-
// Directly enqueued by us
|
|
12085
|
-
return payload;
|
|
12086
|
-
}
|
|
12087
|
-
});
|
|
12088
|
-
}
|
|
12373
|
+
controlInvocations = injectable(map(await syncImplementation.receiveSyncLines({
|
|
12374
|
+
options: syncOptions,
|
|
12375
|
+
connection: resolvedOptions
|
|
12376
|
+
}), (line) => {
|
|
12377
|
+
if (typeof line == 'string') {
|
|
12378
|
+
return {
|
|
12379
|
+
command: exports.PowerSyncControlCommand.PROCESS_TEXT_LINE,
|
|
12380
|
+
payload: line
|
|
12381
|
+
};
|
|
12382
|
+
}
|
|
12383
|
+
else {
|
|
12384
|
+
return {
|
|
12385
|
+
command: exports.PowerSyncControlCommand.PROCESS_BSON_LINE,
|
|
12386
|
+
payload: line
|
|
12387
|
+
};
|
|
12388
|
+
}
|
|
12389
|
+
}));
|
|
12089
12390
|
// The rust client will set connected: true after the first sync line because that's when it gets invoked, but
|
|
12090
12391
|
// we're already connected here and can report that.
|
|
12091
12392
|
syncImplementation.updateSyncStatus({ connected: true });
|
|
12092
12393
|
try {
|
|
12093
|
-
while (
|
|
12094
|
-
|
|
12095
|
-
if (
|
|
12096
|
-
|
|
12394
|
+
while (true) {
|
|
12395
|
+
let event = await controlInvocations.next();
|
|
12396
|
+
if (event.done) {
|
|
12397
|
+
break;
|
|
12097
12398
|
}
|
|
12399
|
+
const line = event.value;
|
|
12098
12400
|
await control(line.command, line.payload);
|
|
12099
12401
|
if (!hadSyncLine) {
|
|
12100
12402
|
syncImplementation.triggerCrudUpload();
|
|
@@ -12103,12 +12405,8 @@ The next upload iteration will be delayed.`);
|
|
|
12103
12405
|
}
|
|
12104
12406
|
}
|
|
12105
12407
|
finally {
|
|
12106
|
-
|
|
12107
|
-
|
|
12108
|
-
// refreshed. That would throw after closing (and we can't handle those events either way), so set this back
|
|
12109
|
-
// to null.
|
|
12110
|
-
controlInvocations = null;
|
|
12111
|
-
await activeInstructions.close();
|
|
12408
|
+
abort();
|
|
12409
|
+
signal.removeEventListener('abort', abort);
|
|
12112
12410
|
}
|
|
12113
12411
|
}
|
|
12114
12412
|
async function stop() {
|
|
@@ -12118,6 +12416,10 @@ The next upload iteration will be delayed.`);
|
|
|
12118
12416
|
const rawResponse = await adapter.control(op, payload ?? null);
|
|
12119
12417
|
const logger = syncImplementation.logger;
|
|
12120
12418
|
logger.trace('powersync_control', op, payload == null || typeof payload == 'string' ? payload : '<bytes>', rawResponse);
|
|
12419
|
+
if (op != exports.PowerSyncControlCommand.STOP) {
|
|
12420
|
+
// Evidently we have a working connection here, otherwise powersync_control would have failed.
|
|
12421
|
+
syncImplementation.connectionMayHaveChanged = false;
|
|
12422
|
+
}
|
|
12121
12423
|
await handleInstructions(JSON.parse(rawResponse));
|
|
12122
12424
|
}
|
|
12123
12425
|
async function handleInstruction(instruction) {
|
|
@@ -12152,14 +12454,14 @@ The next upload iteration will be delayed.`);
|
|
|
12152
12454
|
remote.invalidateCredentials();
|
|
12153
12455
|
// Restart iteration after the credentials have been refreshed.
|
|
12154
12456
|
remote.fetchCredentials().then((_) => {
|
|
12155
|
-
controlInvocations?.
|
|
12457
|
+
controlInvocations?.inject({ command: exports.PowerSyncControlCommand.NOTIFY_TOKEN_REFRESHED });
|
|
12156
12458
|
}, (err) => {
|
|
12157
12459
|
syncImplementation.logger.warn('Could not prefetch credentials', err);
|
|
12158
12460
|
});
|
|
12159
12461
|
}
|
|
12160
12462
|
}
|
|
12161
12463
|
else if ('CloseSyncStream' in instruction) {
|
|
12162
|
-
|
|
12464
|
+
controller.abort();
|
|
12163
12465
|
hideDisconnectOnRestart = instruction.CloseSyncStream.hide_disconnect;
|
|
12164
12466
|
}
|
|
12165
12467
|
else if ('FlushFileSystem' in instruction) ;
|
|
@@ -12188,17 +12490,13 @@ The next upload iteration will be delayed.`);
|
|
|
12188
12490
|
}
|
|
12189
12491
|
await control(exports.PowerSyncControlCommand.START, JSON.stringify(options));
|
|
12190
12492
|
this.notifyCompletedUploads = () => {
|
|
12191
|
-
|
|
12192
|
-
controlInvocations.enqueueData({ command: exports.PowerSyncControlCommand.NOTIFY_CRUD_UPLOAD_COMPLETED });
|
|
12193
|
-
}
|
|
12493
|
+
controlInvocations?.inject({ command: exports.PowerSyncControlCommand.NOTIFY_CRUD_UPLOAD_COMPLETED });
|
|
12194
12494
|
};
|
|
12195
12495
|
this.handleActiveStreamsChange = () => {
|
|
12196
|
-
|
|
12197
|
-
|
|
12198
|
-
|
|
12199
|
-
|
|
12200
|
-
});
|
|
12201
|
-
}
|
|
12496
|
+
controlInvocations?.inject({
|
|
12497
|
+
command: exports.PowerSyncControlCommand.UPDATE_SUBSCRIPTIONS,
|
|
12498
|
+
payload: JSON.stringify(this.activeStreams)
|
|
12499
|
+
});
|
|
12202
12500
|
};
|
|
12203
12501
|
await receivingLines;
|
|
12204
12502
|
}
|
|
@@ -12849,7 +13147,7 @@ class AbstractPowerSyncDatabase extends BaseObserver {
|
|
|
12849
13147
|
this._schema = schema;
|
|
12850
13148
|
this.ready = false;
|
|
12851
13149
|
this.sdkVersion = '';
|
|
12852
|
-
this.runExclusiveMutex = new
|
|
13150
|
+
this.runExclusiveMutex = new Mutex();
|
|
12853
13151
|
// Start async init
|
|
12854
13152
|
this.subscriptions = {
|
|
12855
13153
|
firstStatusMatching: (predicate, abort) => this.waitForStatus(predicate, abort),
|
|
@@ -13314,6 +13612,10 @@ SELECT * FROM crud_entries;
|
|
|
13314
13612
|
* Execute a SQL write (INSERT/UPDATE/DELETE) query
|
|
13315
13613
|
* and optionally return results.
|
|
13316
13614
|
*
|
|
13615
|
+
* When using the default client-side [JSON-based view system](https://docs.powersync.com/architecture/client-architecture#client-side-schema-and-sqlite-database-structure),
|
|
13616
|
+
* the returned result's `rowsAffected` may be `0` for successful `UPDATE` and `DELETE` statements.
|
|
13617
|
+
* Use a `RETURNING` clause and inspect `result.rows` when you need to confirm which rows changed.
|
|
13618
|
+
*
|
|
13317
13619
|
* @param sql The SQL query to execute
|
|
13318
13620
|
* @param parameters Optional array of parameters to bind to the query
|
|
13319
13621
|
* @returns The query result as an object with structured key-value pairs
|
|
@@ -13410,7 +13712,7 @@ SELECT * FROM crud_entries;
|
|
|
13410
13712
|
async readTransaction(callback, lockTimeout = DEFAULT_LOCK_TIMEOUT_MS) {
|
|
13411
13713
|
await this.waitForReady();
|
|
13412
13714
|
return this.database.readTransaction(async (tx) => {
|
|
13413
|
-
const res = await callback(
|
|
13715
|
+
const res = await callback(tx);
|
|
13414
13716
|
await tx.rollback();
|
|
13415
13717
|
return res;
|
|
13416
13718
|
}, { timeoutMs: lockTimeout });
|
|
@@ -14187,10 +14489,8 @@ class Schema {
|
|
|
14187
14489
|
* developer instead of automatically by PowerSync.
|
|
14188
14490
|
* Since raw tables are not backed by JSON, running complex queries on them may be more efficient. Further, they allow
|
|
14189
14491
|
* using client-side table and column constraints.
|
|
14190
|
-
* Note that raw tables are only supported when using the new `SyncClientImplementation.rust` sync client.
|
|
14191
14492
|
*
|
|
14192
14493
|
* @param tables An object of (table name, raw table definition) entries.
|
|
14193
|
-
* @experimental Note that the raw tables API is still experimental and may change in the future.
|
|
14194
14494
|
*/
|
|
14195
14495
|
withRawTables(tables) {
|
|
14196
14496
|
for (const [name, rawTableDefinition] of Object.entries(tables)) {
|
|
@@ -14407,6 +14707,8 @@ exports.ControlledExecutor = ControlledExecutor;
|
|
|
14407
14707
|
exports.CrudBatch = CrudBatch;
|
|
14408
14708
|
exports.CrudEntry = CrudEntry;
|
|
14409
14709
|
exports.CrudTransaction = CrudTransaction;
|
|
14710
|
+
exports.DBAdapterDefaultMixin = DBAdapterDefaultMixin;
|
|
14711
|
+
exports.DBGetUtilsDefaultMixin = DBGetUtilsDefaultMixin;
|
|
14410
14712
|
exports.DEFAULT_CRUD_BATCH_LIMIT = DEFAULT_CRUD_BATCH_LIMIT;
|
|
14411
14713
|
exports.DEFAULT_CRUD_UPLOAD_THROTTLE_MS = DEFAULT_CRUD_UPLOAD_THROTTLE_MS;
|
|
14412
14714
|
exports.DEFAULT_INDEX_COLUMN_OPTIONS = DEFAULT_INDEX_COLUMN_OPTIONS;
|
|
@@ -14414,7 +14716,6 @@ exports.DEFAULT_INDEX_OPTIONS = DEFAULT_INDEX_OPTIONS;
|
|
|
14414
14716
|
exports.DEFAULT_LOCK_TIMEOUT_MS = DEFAULT_LOCK_TIMEOUT_MS;
|
|
14415
14717
|
exports.DEFAULT_POWERSYNC_CLOSE_OPTIONS = DEFAULT_POWERSYNC_CLOSE_OPTIONS;
|
|
14416
14718
|
exports.DEFAULT_POWERSYNC_DB_OPTIONS = DEFAULT_POWERSYNC_DB_OPTIONS;
|
|
14417
|
-
exports.DEFAULT_PRESSURE_LIMITS = DEFAULT_PRESSURE_LIMITS;
|
|
14418
14719
|
exports.DEFAULT_REMOTE_LOGGER = DEFAULT_REMOTE_LOGGER;
|
|
14419
14720
|
exports.DEFAULT_REMOTE_OPTIONS = DEFAULT_REMOTE_OPTIONS;
|
|
14420
14721
|
exports.DEFAULT_RETRY_DELAY_MS = DEFAULT_RETRY_DELAY_MS;
|
|
@@ -14425,7 +14726,6 @@ exports.DEFAULT_SYNC_CLIENT_IMPLEMENTATION = DEFAULT_SYNC_CLIENT_IMPLEMENTATION;
|
|
|
14425
14726
|
exports.DEFAULT_TABLE_OPTIONS = DEFAULT_TABLE_OPTIONS;
|
|
14426
14727
|
exports.DEFAULT_WATCH_QUERY_OPTIONS = DEFAULT_WATCH_QUERY_OPTIONS;
|
|
14427
14728
|
exports.DEFAULT_WATCH_THROTTLE_MS = DEFAULT_WATCH_THROTTLE_MS;
|
|
14428
|
-
exports.DataStream = DataStream;
|
|
14429
14729
|
exports.DifferentialQueryProcessor = DifferentialQueryProcessor;
|
|
14430
14730
|
exports.EMPTY_DIFFERENTIAL = EMPTY_DIFFERENTIAL;
|
|
14431
14731
|
exports.FalsyComparator = FalsyComparator;
|
|
@@ -14438,10 +14738,12 @@ exports.LogLevel = LogLevel;
|
|
|
14438
14738
|
exports.MAX_AMOUNT_OF_COLUMNS = MAX_AMOUNT_OF_COLUMNS;
|
|
14439
14739
|
exports.MAX_OP_ID = MAX_OP_ID;
|
|
14440
14740
|
exports.MEMORY_TRIGGER_CLAIM_MANAGER = MEMORY_TRIGGER_CLAIM_MANAGER;
|
|
14741
|
+
exports.Mutex = Mutex;
|
|
14441
14742
|
exports.OnChangeQueryProcessor = OnChangeQueryProcessor;
|
|
14442
14743
|
exports.OpType = OpType;
|
|
14443
14744
|
exports.OplogEntry = OplogEntry;
|
|
14444
14745
|
exports.Schema = Schema;
|
|
14746
|
+
exports.Semaphore = Semaphore;
|
|
14445
14747
|
exports.SqliteBucketStorage = SqliteBucketStorage;
|
|
14446
14748
|
exports.SyncDataBatch = SyncDataBatch;
|
|
14447
14749
|
exports.SyncDataBucket = SyncDataBucket;
|
|
@@ -14471,9 +14773,9 @@ exports.isStreamingSyncCheckpointDiff = isStreamingSyncCheckpointDiff;
|
|
|
14471
14773
|
exports.isStreamingSyncCheckpointPartiallyComplete = isStreamingSyncCheckpointPartiallyComplete;
|
|
14472
14774
|
exports.isStreamingSyncData = isStreamingSyncData;
|
|
14473
14775
|
exports.isSyncNewCheckpointRequest = isSyncNewCheckpointRequest;
|
|
14474
|
-
exports.mutexRunExclusive = mutexRunExclusive;
|
|
14475
14776
|
exports.parseQuery = parseQuery;
|
|
14476
14777
|
exports.runOnSchemaChange = runOnSchemaChange;
|
|
14477
14778
|
exports.sanitizeSQL = sanitizeSQL;
|
|
14478
14779
|
exports.sanitizeUUID = sanitizeUUID;
|
|
14780
|
+
exports.timeoutSignal = timeoutSignal;
|
|
14479
14781
|
//# sourceMappingURL=bundle.cjs.map
|