@powersync/common 0.0.0-dev-20260311081226 → 0.0.0-dev-20260414110516
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bundle.cjs +775 -485
- package/dist/bundle.cjs.map +1 -1
- package/dist/bundle.mjs +769 -481
- package/dist/bundle.mjs.map +1 -1
- package/dist/bundle.node.cjs +773 -484
- package/dist/bundle.node.cjs.map +1 -1
- package/dist/bundle.node.mjs +767 -480
- package/dist/bundle.node.mjs.map +1 -1
- package/dist/index.d.cts +175 -94
- package/lib/attachments/AttachmentQueue.d.ts +10 -4
- package/lib/attachments/AttachmentQueue.js +10 -4
- package/lib/attachments/AttachmentQueue.js.map +1 -1
- package/lib/attachments/AttachmentService.js +2 -3
- package/lib/attachments/AttachmentService.js.map +1 -1
- package/lib/attachments/SyncingService.d.ts +2 -1
- package/lib/attachments/SyncingService.js +4 -5
- package/lib/attachments/SyncingService.js.map +1 -1
- package/lib/client/AbstractPowerSyncDatabase.d.ts +5 -1
- package/lib/client/AbstractPowerSyncDatabase.js +9 -5
- package/lib/client/AbstractPowerSyncDatabase.js.map +1 -1
- package/lib/client/sync/stream/AbstractRemote.d.ts +29 -8
- package/lib/client/sync/stream/AbstractRemote.js +154 -177
- package/lib/client/sync/stream/AbstractRemote.js.map +1 -1
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.d.ts +1 -0
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js +69 -88
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js.map +1 -1
- package/lib/client/triggers/TriggerManager.d.ts +12 -1
- package/lib/client/triggers/TriggerManagerImpl.d.ts +2 -2
- package/lib/client/triggers/TriggerManagerImpl.js +3 -2
- package/lib/client/triggers/TriggerManagerImpl.js.map +1 -1
- package/lib/db/DBAdapter.d.ts +55 -9
- package/lib/db/DBAdapter.js +126 -0
- package/lib/db/DBAdapter.js.map +1 -1
- package/lib/index.d.ts +1 -1
- package/lib/index.js +0 -1
- package/lib/index.js.map +1 -1
- package/lib/utils/async.d.ts +0 -9
- package/lib/utils/async.js +0 -9
- package/lib/utils/async.js.map +1 -1
- package/lib/utils/mutex.d.ts +47 -5
- package/lib/utils/mutex.js +146 -21
- package/lib/utils/mutex.js.map +1 -1
- package/lib/utils/queue.d.ts +16 -0
- package/lib/utils/queue.js +42 -0
- package/lib/utils/queue.js.map +1 -0
- package/lib/utils/stream_transform.d.ts +39 -0
- package/lib/utils/stream_transform.js +206 -0
- package/lib/utils/stream_transform.js.map +1 -0
- package/package.json +9 -8
- package/src/attachments/AttachmentQueue.ts +10 -4
- package/src/attachments/AttachmentService.ts +2 -3
- package/src/attachments/README.md +6 -4
- package/src/attachments/SyncingService.ts +4 -5
- package/src/client/AbstractPowerSyncDatabase.ts +9 -5
- package/src/client/sync/stream/AbstractRemote.ts +182 -206
- package/src/client/sync/stream/AbstractStreamingSyncImplementation.ts +82 -83
- package/src/client/triggers/TriggerManager.ts +13 -1
- package/src/client/triggers/TriggerManagerImpl.ts +4 -2
- package/src/db/DBAdapter.ts +167 -9
- package/src/index.ts +1 -1
- package/src/utils/async.ts +0 -11
- package/src/utils/mutex.ts +184 -26
- package/src/utils/queue.ts +48 -0
- package/src/utils/stream_transform.ts +252 -0
- package/lib/utils/DataStream.d.ts +0 -62
- package/lib/utils/DataStream.js +0 -169
- package/lib/utils/DataStream.js.map +0 -1
- package/src/utils/DataStream.ts +0 -222
package/dist/bundle.mjs
CHANGED
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
import { Mutex } from 'async-mutex';
|
|
2
|
-
|
|
3
1
|
// https://www.sqlite.org/lang_expr.html#castexpr
|
|
4
2
|
var ColumnType;
|
|
5
3
|
(function (ColumnType) {
|
|
@@ -657,7 +655,7 @@ class SyncingService {
|
|
|
657
655
|
updatedAttachments.push(downloaded);
|
|
658
656
|
break;
|
|
659
657
|
case AttachmentState.QUEUED_DELETE:
|
|
660
|
-
const deleted = await this.deleteAttachment(attachment);
|
|
658
|
+
const deleted = await this.deleteAttachment(attachment, context);
|
|
661
659
|
updatedAttachments.push(deleted);
|
|
662
660
|
break;
|
|
663
661
|
}
|
|
@@ -735,17 +733,16 @@ class SyncingService {
|
|
|
735
733
|
* On failure, defers to error handler or archives.
|
|
736
734
|
*
|
|
737
735
|
* @param attachment - The attachment record to delete
|
|
736
|
+
* @param context - Attachment context for database operations
|
|
738
737
|
* @returns Updated attachment record
|
|
739
738
|
*/
|
|
740
|
-
async deleteAttachment(attachment) {
|
|
739
|
+
async deleteAttachment(attachment, context) {
|
|
741
740
|
try {
|
|
742
741
|
await this.remoteStorage.deleteFile(attachment);
|
|
743
742
|
if (attachment.localUri) {
|
|
744
743
|
await this.localStorage.deleteFile(attachment.localUri);
|
|
745
744
|
}
|
|
746
|
-
await
|
|
747
|
-
await ctx.deleteAttachment(attachment.id);
|
|
748
|
-
});
|
|
745
|
+
await context.deleteAttachment(attachment.id);
|
|
749
746
|
return {
|
|
750
747
|
...attachment,
|
|
751
748
|
state: AttachmentState.ARCHIVED
|
|
@@ -783,32 +780,198 @@ class SyncingService {
|
|
|
783
780
|
}
|
|
784
781
|
|
|
785
782
|
/**
|
|
786
|
-
*
|
|
783
|
+
* A simple fixed-capacity queue implementation.
|
|
784
|
+
*
|
|
785
|
+
* Unlike a naive queue implemented by `array.push()` and `array.shift()`, this avoids moving array elements around
|
|
786
|
+
* and is `O(1)` for {@link addLast} and {@link removeFirst}.
|
|
787
787
|
*/
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
788
|
+
class Queue {
|
|
789
|
+
table;
|
|
790
|
+
// Index of the first element in the table.
|
|
791
|
+
head;
|
|
792
|
+
// Amount of items currently in the queue.
|
|
793
|
+
_length;
|
|
794
|
+
constructor(initialItems) {
|
|
795
|
+
this.table = [...initialItems];
|
|
796
|
+
this.head = 0;
|
|
797
|
+
this._length = this.table.length;
|
|
798
|
+
}
|
|
799
|
+
get isEmpty() {
|
|
800
|
+
return this.length == 0;
|
|
801
|
+
}
|
|
802
|
+
get length() {
|
|
803
|
+
return this._length;
|
|
804
|
+
}
|
|
805
|
+
removeFirst() {
|
|
806
|
+
if (this.isEmpty) {
|
|
807
|
+
throw new Error('Queue is empty');
|
|
808
|
+
}
|
|
809
|
+
const result = this.table[this.head];
|
|
810
|
+
this._length--;
|
|
811
|
+
this.table[this.head] = undefined;
|
|
812
|
+
this.head = (this.head + 1) % this.table.length;
|
|
813
|
+
return result;
|
|
814
|
+
}
|
|
815
|
+
addLast(element) {
|
|
816
|
+
if (this.length == this.table.length) {
|
|
817
|
+
throw new Error('Queue is full');
|
|
818
|
+
}
|
|
819
|
+
this.table[(this.head + this._length) % this.table.length] = element;
|
|
820
|
+
this._length++;
|
|
821
|
+
}
|
|
822
|
+
}
|
|
823
|
+
|
|
824
|
+
/**
|
|
825
|
+
* An asynchronous semaphore implementation with associated items per lease.
|
|
826
|
+
*
|
|
827
|
+
* @internal This class is meant to be used in PowerSync SDKs only, and is not part of the public API.
|
|
828
|
+
*/
|
|
829
|
+
class Semaphore {
|
|
830
|
+
// Available items that are not currently assigned to a waiter.
|
|
831
|
+
available;
|
|
832
|
+
size;
|
|
833
|
+
// Linked list of waiters. We don't expect the wait list to become particularly large, and this allows removing
|
|
834
|
+
// aborted waiters from the middle of the list efficiently.
|
|
835
|
+
firstWaiter;
|
|
836
|
+
lastWaiter;
|
|
837
|
+
constructor(elements) {
|
|
838
|
+
this.available = new Queue(elements);
|
|
839
|
+
this.size = this.available.length;
|
|
840
|
+
}
|
|
841
|
+
addWaiter(requestedItems, onAcquire) {
|
|
842
|
+
const node = {
|
|
843
|
+
isActive: true,
|
|
844
|
+
acquiredItems: [],
|
|
845
|
+
remainingItems: requestedItems,
|
|
846
|
+
onAcquire,
|
|
847
|
+
prev: this.lastWaiter
|
|
848
|
+
};
|
|
849
|
+
if (this.lastWaiter) {
|
|
850
|
+
this.lastWaiter.next = node;
|
|
851
|
+
this.lastWaiter = node;
|
|
852
|
+
}
|
|
853
|
+
else {
|
|
854
|
+
// First waiter
|
|
855
|
+
this.lastWaiter = this.firstWaiter = node;
|
|
856
|
+
}
|
|
857
|
+
return node;
|
|
858
|
+
}
|
|
859
|
+
deactivateWaiter(waiter) {
|
|
860
|
+
const { prev, next } = waiter;
|
|
861
|
+
waiter.isActive = false;
|
|
862
|
+
if (prev)
|
|
863
|
+
prev.next = next;
|
|
864
|
+
if (next)
|
|
865
|
+
next.prev = prev;
|
|
866
|
+
if (waiter == this.firstWaiter)
|
|
867
|
+
this.firstWaiter = next;
|
|
868
|
+
if (waiter == this.lastWaiter)
|
|
869
|
+
this.lastWaiter = prev;
|
|
870
|
+
}
|
|
871
|
+
requestPermits(amount, abort) {
|
|
872
|
+
if (amount <= 0 || amount > this.size) {
|
|
873
|
+
throw new Error(`Invalid amount of items requested (${amount}), must be between 1 and ${this.size}`);
|
|
874
|
+
}
|
|
875
|
+
return new Promise((resolve, reject) => {
|
|
876
|
+
function rejectAborted() {
|
|
877
|
+
reject(abort?.reason ?? new Error('Semaphore acquire aborted'));
|
|
878
|
+
}
|
|
879
|
+
if (abort?.aborted) {
|
|
880
|
+
return rejectAborted();
|
|
881
|
+
}
|
|
882
|
+
let waiter;
|
|
883
|
+
const markCompleted = () => {
|
|
884
|
+
const items = waiter.acquiredItems;
|
|
885
|
+
waiter.acquiredItems = []; // Avoid releasing items twice.
|
|
886
|
+
for (const element of items) {
|
|
887
|
+
// Give to next waiter, if possible.
|
|
888
|
+
const nextWaiter = this.firstWaiter;
|
|
889
|
+
if (nextWaiter) {
|
|
890
|
+
nextWaiter.acquiredItems.push(element);
|
|
891
|
+
nextWaiter.remainingItems--;
|
|
892
|
+
if (nextWaiter.remainingItems == 0) {
|
|
893
|
+
nextWaiter.onAcquire();
|
|
894
|
+
}
|
|
895
|
+
}
|
|
896
|
+
else {
|
|
897
|
+
// No pending waiter, return lease into pool.
|
|
898
|
+
this.available.addLast(element);
|
|
899
|
+
}
|
|
900
|
+
}
|
|
901
|
+
};
|
|
902
|
+
const onAbort = () => {
|
|
903
|
+
abort?.removeEventListener('abort', onAbort);
|
|
904
|
+
if (waiter.isActive) {
|
|
905
|
+
this.deactivateWaiter(waiter);
|
|
906
|
+
rejectAborted();
|
|
907
|
+
}
|
|
908
|
+
};
|
|
909
|
+
const resolvePromise = () => {
|
|
910
|
+
this.deactivateWaiter(waiter);
|
|
911
|
+
abort?.removeEventListener('abort', onAbort);
|
|
912
|
+
const items = waiter.acquiredItems;
|
|
913
|
+
resolve({ items, release: markCompleted });
|
|
914
|
+
};
|
|
915
|
+
waiter = this.addWaiter(amount, resolvePromise);
|
|
916
|
+
// If there are items in the pool that haven't been assigned, we can pull them into this waiter. Note that this is
|
|
917
|
+
// only the case if we're the first waiter (otherwise, items would have been assigned to an earlier waiter).
|
|
918
|
+
while (!this.available.isEmpty && waiter.remainingItems > 0) {
|
|
919
|
+
waiter.acquiredItems.push(this.available.removeFirst());
|
|
920
|
+
waiter.remainingItems--;
|
|
806
921
|
}
|
|
807
|
-
|
|
808
|
-
|
|
922
|
+
if (waiter.remainingItems == 0) {
|
|
923
|
+
return resolvePromise();
|
|
809
924
|
}
|
|
925
|
+
abort?.addEventListener('abort', onAbort);
|
|
810
926
|
});
|
|
811
|
-
}
|
|
927
|
+
}
|
|
928
|
+
/**
|
|
929
|
+
* Requests a single item from the pool.
|
|
930
|
+
*
|
|
931
|
+
* The returned `release` callback must be invoked to return the item into the pool.
|
|
932
|
+
*/
|
|
933
|
+
async requestOne(abort) {
|
|
934
|
+
const { items, release } = await this.requestPermits(1, abort);
|
|
935
|
+
return { release, item: items[0] };
|
|
936
|
+
}
|
|
937
|
+
/**
|
|
938
|
+
* Requests access to all items from the pool.
|
|
939
|
+
*
|
|
940
|
+
* The returned `release` callback must be invoked to return items into the pool.
|
|
941
|
+
*/
|
|
942
|
+
requestAll(abort) {
|
|
943
|
+
return this.requestPermits(this.size, abort);
|
|
944
|
+
}
|
|
945
|
+
}
|
|
946
|
+
/**
|
|
947
|
+
* An asynchronous mutex implementation.
|
|
948
|
+
*
|
|
949
|
+
* @internal This class is meant to be used in PowerSync SDKs only, and is not part of the public API.
|
|
950
|
+
*/
|
|
951
|
+
class Mutex {
|
|
952
|
+
inner = new Semaphore([null]);
|
|
953
|
+
async acquire(abort) {
|
|
954
|
+
const { release } = await this.inner.requestOne(abort);
|
|
955
|
+
return release;
|
|
956
|
+
}
|
|
957
|
+
async runExclusive(fn, abort) {
|
|
958
|
+
const returnMutex = await this.acquire(abort);
|
|
959
|
+
try {
|
|
960
|
+
return await fn();
|
|
961
|
+
}
|
|
962
|
+
finally {
|
|
963
|
+
returnMutex();
|
|
964
|
+
}
|
|
965
|
+
}
|
|
966
|
+
}
|
|
967
|
+
function timeoutSignal(timeout) {
|
|
968
|
+
if (timeout == null)
|
|
969
|
+
return;
|
|
970
|
+
if ('timeout' in AbortSignal)
|
|
971
|
+
return AbortSignal.timeout(timeout);
|
|
972
|
+
const controller = new AbortController();
|
|
973
|
+
setTimeout(() => controller.abort(new Error('Timeout waiting for lock')), timeout);
|
|
974
|
+
return controller.signal;
|
|
812
975
|
}
|
|
813
976
|
|
|
814
977
|
/**
|
|
@@ -857,7 +1020,7 @@ class AttachmentService {
|
|
|
857
1020
|
* Executes a callback with exclusive access to the attachment context.
|
|
858
1021
|
*/
|
|
859
1022
|
async withContext(callback) {
|
|
860
|
-
return
|
|
1023
|
+
return this.mutex.runExclusive(async () => {
|
|
861
1024
|
return callback(this.context);
|
|
862
1025
|
});
|
|
863
1026
|
}
|
|
@@ -893,9 +1056,15 @@ class AttachmentQueue {
|
|
|
893
1056
|
tableName;
|
|
894
1057
|
/** Logger instance for diagnostic information */
|
|
895
1058
|
logger;
|
|
896
|
-
/** Interval in milliseconds between periodic sync operations.
|
|
1059
|
+
/** Interval in milliseconds between periodic sync operations. Acts as a polling timer to retry
|
|
1060
|
+
* failed uploads/downloads, especially after the app goes offline. Default: 30000 (30 seconds) */
|
|
897
1061
|
syncIntervalMs = 30 * 1000;
|
|
898
|
-
/**
|
|
1062
|
+
/** Throttle duration in milliseconds for the reactive watch query on the attachments table.
|
|
1063
|
+
* When attachment records change, a watch query detects the change and triggers a sync.
|
|
1064
|
+
* This throttle prevents the sync from firing too rapidly when many changes happen in
|
|
1065
|
+
* quick succession (e.g., bulk inserts). This is distinct from syncIntervalMs — it controls
|
|
1066
|
+
* how quickly the queue reacts to changes, while syncIntervalMs controls how often it polls
|
|
1067
|
+
* for retries. Default: 30 (from DEFAULT_WATCH_THROTTLE_MS) */
|
|
899
1068
|
syncThrottleDuration;
|
|
900
1069
|
/** Whether to automatically download remote attachments. Default: true */
|
|
901
1070
|
downloadAttachments = true;
|
|
@@ -919,8 +1088,8 @@ class AttachmentQueue {
|
|
|
919
1088
|
* @param options.watchAttachments - Callback for monitoring attachment changes in your data model
|
|
920
1089
|
* @param options.tableName - Name of the table to store attachment records. Default: 'ps_attachment_queue'
|
|
921
1090
|
* @param options.logger - Logger instance. Defaults to db.logger
|
|
922
|
-
* @param options.syncIntervalMs -
|
|
923
|
-
* @param options.syncThrottleDuration - Throttle duration for
|
|
1091
|
+
* @param options.syncIntervalMs - Periodic polling interval in milliseconds for retrying failed uploads/downloads. Default: 30000
|
|
1092
|
+
* @param options.syncThrottleDuration - Throttle duration in milliseconds for the reactive watch query that detects attachment changes. Prevents rapid-fire syncs during bulk changes. Default: 30
|
|
924
1093
|
* @param options.downloadAttachments - Whether to automatically download remote attachments. Default: true
|
|
925
1094
|
* @param options.archivedCacheLimit - Maximum archived attachments before cleanup. Default: 100
|
|
926
1095
|
*/
|
|
@@ -1227,6 +1396,8 @@ var EncodingType;
|
|
|
1227
1396
|
EncodingType["Base64"] = "base64";
|
|
1228
1397
|
})(EncodingType || (EncodingType = {}));
|
|
1229
1398
|
|
|
1399
|
+
const symbolAsyncIterator = Symbol.asyncIterator ?? Symbol.for('Symbol.asyncIterator');
|
|
1400
|
+
|
|
1230
1401
|
function getDefaultExportFromCjs (x) {
|
|
1231
1402
|
return x && x.__esModule && Object.prototype.hasOwnProperty.call(x, 'default') ? x['default'] : x;
|
|
1232
1403
|
}
|
|
@@ -1307,7 +1478,7 @@ function requireEventIterator () {
|
|
|
1307
1478
|
this.removeCallback();
|
|
1308
1479
|
});
|
|
1309
1480
|
}
|
|
1310
|
-
[
|
|
1481
|
+
[symbolAsyncIterator]() {
|
|
1311
1482
|
return {
|
|
1312
1483
|
next: (value) => {
|
|
1313
1484
|
const result = this.pushQueue.shift();
|
|
@@ -1354,7 +1525,7 @@ function requireEventIterator () {
|
|
|
1354
1525
|
queue.eventHandlers[event] = fn;
|
|
1355
1526
|
},
|
|
1356
1527
|
}) || (() => { });
|
|
1357
|
-
this[
|
|
1528
|
+
this[symbolAsyncIterator] = () => queue[symbolAsyncIterator]();
|
|
1358
1529
|
Object.freeze(this);
|
|
1359
1530
|
}
|
|
1360
1531
|
}
|
|
@@ -1681,6 +1852,49 @@ var Logger = /*@__PURE__*/getDefaultExportFromCjs(loggerExports);
|
|
|
1681
1852
|
* Set of generic interfaces to allow PowerSync compatibility with
|
|
1682
1853
|
* different SQLite DB implementations.
|
|
1683
1854
|
*/
|
|
1855
|
+
/**
|
|
1856
|
+
* Implements {@link DBGetUtils} on a {@link SqlRunner}.
|
|
1857
|
+
*/
|
|
1858
|
+
function DBGetUtilsDefaultMixin(Base) {
|
|
1859
|
+
return class extends Base {
|
|
1860
|
+
async getAll(sql, parameters) {
|
|
1861
|
+
const res = await this.execute(sql, parameters);
|
|
1862
|
+
return res.rows?._array ?? [];
|
|
1863
|
+
}
|
|
1864
|
+
async getOptional(sql, parameters) {
|
|
1865
|
+
const res = await this.execute(sql, parameters);
|
|
1866
|
+
return res.rows?.item(0) ?? null;
|
|
1867
|
+
}
|
|
1868
|
+
async get(sql, parameters) {
|
|
1869
|
+
const res = await this.execute(sql, parameters);
|
|
1870
|
+
const first = res.rows?.item(0);
|
|
1871
|
+
if (!first) {
|
|
1872
|
+
throw new Error('Result set is empty');
|
|
1873
|
+
}
|
|
1874
|
+
return first;
|
|
1875
|
+
}
|
|
1876
|
+
async executeBatch(query, params = []) {
|
|
1877
|
+
// If this context can run batch statements natively, use that.
|
|
1878
|
+
// @ts-ignore
|
|
1879
|
+
if (super.executeBatch) {
|
|
1880
|
+
// @ts-ignore
|
|
1881
|
+
return super.executeBatch(query, params);
|
|
1882
|
+
}
|
|
1883
|
+
// Emulate executeBatch by running statements individually.
|
|
1884
|
+
let lastInsertId;
|
|
1885
|
+
let rowsAffected = 0;
|
|
1886
|
+
for (const set of params) {
|
|
1887
|
+
const result = await this.execute(query, set);
|
|
1888
|
+
lastInsertId = result.insertId;
|
|
1889
|
+
rowsAffected += result.rowsAffected;
|
|
1890
|
+
}
|
|
1891
|
+
return {
|
|
1892
|
+
rowsAffected,
|
|
1893
|
+
insertId: lastInsertId
|
|
1894
|
+
};
|
|
1895
|
+
}
|
|
1896
|
+
};
|
|
1897
|
+
}
|
|
1684
1898
|
/**
|
|
1685
1899
|
* Update table operation numbers from SQLite
|
|
1686
1900
|
*/
|
|
@@ -1690,6 +1904,89 @@ var RowUpdateType;
|
|
|
1690
1904
|
RowUpdateType[RowUpdateType["SQLITE_DELETE"] = 9] = "SQLITE_DELETE";
|
|
1691
1905
|
RowUpdateType[RowUpdateType["SQLITE_UPDATE"] = 23] = "SQLITE_UPDATE";
|
|
1692
1906
|
})(RowUpdateType || (RowUpdateType = {}));
|
|
1907
|
+
/**
|
|
1908
|
+
* A mixin to implement {@link DBAdapter} by delegating to {@link ConnectionPool.readLock} and
|
|
1909
|
+
* {@link ConnectionPool.writeLock}.
|
|
1910
|
+
*/
|
|
1911
|
+
function DBAdapterDefaultMixin(Base) {
|
|
1912
|
+
return class extends Base {
|
|
1913
|
+
readTransaction(fn, options) {
|
|
1914
|
+
return this.readLock((ctx) => TransactionImplementation.runWith(ctx, fn), options);
|
|
1915
|
+
}
|
|
1916
|
+
writeTransaction(fn, options) {
|
|
1917
|
+
return this.writeLock((ctx) => TransactionImplementation.runWith(ctx, fn), options);
|
|
1918
|
+
}
|
|
1919
|
+
getAll(sql, parameters) {
|
|
1920
|
+
return this.readLock((ctx) => ctx.getAll(sql, parameters));
|
|
1921
|
+
}
|
|
1922
|
+
getOptional(sql, parameters) {
|
|
1923
|
+
return this.readLock((ctx) => ctx.getOptional(sql, parameters));
|
|
1924
|
+
}
|
|
1925
|
+
get(sql, parameters) {
|
|
1926
|
+
return this.readLock((ctx) => ctx.get(sql, parameters));
|
|
1927
|
+
}
|
|
1928
|
+
execute(query, params) {
|
|
1929
|
+
return this.writeLock((ctx) => ctx.execute(query, params));
|
|
1930
|
+
}
|
|
1931
|
+
executeRaw(query, params) {
|
|
1932
|
+
return this.writeLock((ctx) => ctx.executeRaw(query, params));
|
|
1933
|
+
}
|
|
1934
|
+
executeBatch(query, params) {
|
|
1935
|
+
return this.writeTransaction((tx) => tx.executeBatch(query, params));
|
|
1936
|
+
}
|
|
1937
|
+
};
|
|
1938
|
+
}
|
|
1939
|
+
class BaseTransaction {
|
|
1940
|
+
inner;
|
|
1941
|
+
finalized = false;
|
|
1942
|
+
constructor(inner) {
|
|
1943
|
+
this.inner = inner;
|
|
1944
|
+
}
|
|
1945
|
+
async commit() {
|
|
1946
|
+
if (this.finalized) {
|
|
1947
|
+
return { rowsAffected: 0 };
|
|
1948
|
+
}
|
|
1949
|
+
this.finalized = true;
|
|
1950
|
+
return this.inner.execute('COMMIT');
|
|
1951
|
+
}
|
|
1952
|
+
async rollback() {
|
|
1953
|
+
if (this.finalized) {
|
|
1954
|
+
return { rowsAffected: 0 };
|
|
1955
|
+
}
|
|
1956
|
+
this.finalized = true;
|
|
1957
|
+
return this.inner.execute('ROLLBACK');
|
|
1958
|
+
}
|
|
1959
|
+
execute(query, params) {
|
|
1960
|
+
return this.inner.execute(query, params);
|
|
1961
|
+
}
|
|
1962
|
+
executeRaw(query, params) {
|
|
1963
|
+
return this.inner.executeRaw(query, params);
|
|
1964
|
+
}
|
|
1965
|
+
executeBatch(query, params) {
|
|
1966
|
+
return this.inner.executeBatch(query, params);
|
|
1967
|
+
}
|
|
1968
|
+
}
|
|
1969
|
+
class TransactionImplementation extends DBGetUtilsDefaultMixin(BaseTransaction) {
|
|
1970
|
+
static async runWith(ctx, fn) {
|
|
1971
|
+
let tx = new TransactionImplementation(ctx);
|
|
1972
|
+
try {
|
|
1973
|
+
await ctx.execute('BEGIN IMMEDIATE');
|
|
1974
|
+
const result = await fn(tx);
|
|
1975
|
+
await tx.commit();
|
|
1976
|
+
return result;
|
|
1977
|
+
}
|
|
1978
|
+
catch (ex) {
|
|
1979
|
+
try {
|
|
1980
|
+
await tx.rollback();
|
|
1981
|
+
}
|
|
1982
|
+
catch (ex2) {
|
|
1983
|
+
// In rare cases, a rollback may fail.
|
|
1984
|
+
// Safe to ignore.
|
|
1985
|
+
}
|
|
1986
|
+
throw ex;
|
|
1987
|
+
}
|
|
1988
|
+
}
|
|
1989
|
+
}
|
|
1693
1990
|
function isBatchedUpdateNotification(update) {
|
|
1694
1991
|
return 'tables' in update;
|
|
1695
1992
|
}
|
|
@@ -2110,15 +2407,6 @@ class ControlledExecutor {
|
|
|
2110
2407
|
}
|
|
2111
2408
|
}
|
|
2112
2409
|
|
|
2113
|
-
/**
|
|
2114
|
-
* A ponyfill for `Symbol.asyncIterator` that is compatible with the
|
|
2115
|
-
* [recommended polyfill](https://github.com/Azure/azure-sdk-for-js/blob/%40azure/core-asynciterator-polyfill_1.0.2/sdk/core/core-asynciterator-polyfill/src/index.ts#L4-L6)
|
|
2116
|
-
* we recommend for React Native.
|
|
2117
|
-
*
|
|
2118
|
-
* As long as we use this symbol (instead of `for await` and `async *`) in this package, we can be compatible with async
|
|
2119
|
-
* iterators without requiring them.
|
|
2120
|
-
*/
|
|
2121
|
-
const symbolAsyncIterator = Symbol.asyncIterator ?? Symbol.for('Symbol.asyncIterator');
|
|
2122
2410
|
/**
|
|
2123
2411
|
* Throttle a function to be called at most once every "wait" milliseconds,
|
|
2124
2412
|
* on the trailing edge.
|
|
@@ -10455,177 +10743,10 @@ function requireDist () {
|
|
|
10455
10743
|
|
|
10456
10744
|
var distExports = requireDist();
|
|
10457
10745
|
|
|
10458
|
-
var version = "1.
|
|
10746
|
+
var version = "1.51.0";
|
|
10459
10747
|
var PACKAGE = {
|
|
10460
10748
|
version: version};
|
|
10461
10749
|
|
|
10462
|
-
const DEFAULT_PRESSURE_LIMITS = {
|
|
10463
|
-
highWater: 10,
|
|
10464
|
-
lowWater: 0
|
|
10465
|
-
};
|
|
10466
|
-
/**
|
|
10467
|
-
* A very basic implementation of a data stream with backpressure support which does not use
|
|
10468
|
-
* native JS streams or async iterators.
|
|
10469
|
-
* This is handy for environments such as React Native which need polyfills for the above.
|
|
10470
|
-
*/
|
|
10471
|
-
class DataStream extends BaseObserver {
|
|
10472
|
-
options;
|
|
10473
|
-
dataQueue;
|
|
10474
|
-
isClosed;
|
|
10475
|
-
processingPromise;
|
|
10476
|
-
notifyDataAdded;
|
|
10477
|
-
logger;
|
|
10478
|
-
mapLine;
|
|
10479
|
-
constructor(options) {
|
|
10480
|
-
super();
|
|
10481
|
-
this.options = options;
|
|
10482
|
-
this.processingPromise = null;
|
|
10483
|
-
this.isClosed = false;
|
|
10484
|
-
this.dataQueue = [];
|
|
10485
|
-
this.mapLine = options?.mapLine ?? ((line) => line);
|
|
10486
|
-
this.logger = options?.logger ?? Logger.get('DataStream');
|
|
10487
|
-
if (options?.closeOnError) {
|
|
10488
|
-
const l = this.registerListener({
|
|
10489
|
-
error: (ex) => {
|
|
10490
|
-
l?.();
|
|
10491
|
-
this.close();
|
|
10492
|
-
}
|
|
10493
|
-
});
|
|
10494
|
-
}
|
|
10495
|
-
}
|
|
10496
|
-
get highWatermark() {
|
|
10497
|
-
return this.options?.pressure?.highWaterMark ?? DEFAULT_PRESSURE_LIMITS.highWater;
|
|
10498
|
-
}
|
|
10499
|
-
get lowWatermark() {
|
|
10500
|
-
return this.options?.pressure?.lowWaterMark ?? DEFAULT_PRESSURE_LIMITS.lowWater;
|
|
10501
|
-
}
|
|
10502
|
-
get closed() {
|
|
10503
|
-
return this.isClosed;
|
|
10504
|
-
}
|
|
10505
|
-
async close() {
|
|
10506
|
-
this.isClosed = true;
|
|
10507
|
-
await this.processingPromise;
|
|
10508
|
-
this.iterateListeners((l) => l.closed?.());
|
|
10509
|
-
// Discard any data in the queue
|
|
10510
|
-
this.dataQueue = [];
|
|
10511
|
-
this.listeners.clear();
|
|
10512
|
-
}
|
|
10513
|
-
/**
|
|
10514
|
-
* Enqueues data for the consumers to read
|
|
10515
|
-
*/
|
|
10516
|
-
enqueueData(data) {
|
|
10517
|
-
if (this.isClosed) {
|
|
10518
|
-
throw new Error('Cannot enqueue data into closed stream.');
|
|
10519
|
-
}
|
|
10520
|
-
this.dataQueue.push(data);
|
|
10521
|
-
this.notifyDataAdded?.();
|
|
10522
|
-
this.processQueue();
|
|
10523
|
-
}
|
|
10524
|
-
/**
|
|
10525
|
-
* Reads data once from the data stream
|
|
10526
|
-
* @returns a Data payload or Null if the stream closed.
|
|
10527
|
-
*/
|
|
10528
|
-
async read() {
|
|
10529
|
-
if (this.closed) {
|
|
10530
|
-
return null;
|
|
10531
|
-
}
|
|
10532
|
-
// Wait for any pending processing to complete first.
|
|
10533
|
-
// This ensures we register our listener before calling processQueue(),
|
|
10534
|
-
// avoiding a race where processQueue() sees no reader and returns early.
|
|
10535
|
-
if (this.processingPromise) {
|
|
10536
|
-
await this.processingPromise;
|
|
10537
|
-
}
|
|
10538
|
-
// Re-check after await - stream may have closed while we were waiting
|
|
10539
|
-
if (this.closed) {
|
|
10540
|
-
return null;
|
|
10541
|
-
}
|
|
10542
|
-
return new Promise((resolve, reject) => {
|
|
10543
|
-
const l = this.registerListener({
|
|
10544
|
-
data: async (data) => {
|
|
10545
|
-
resolve(data);
|
|
10546
|
-
// Remove the listener
|
|
10547
|
-
l?.();
|
|
10548
|
-
},
|
|
10549
|
-
closed: () => {
|
|
10550
|
-
resolve(null);
|
|
10551
|
-
l?.();
|
|
10552
|
-
},
|
|
10553
|
-
error: (ex) => {
|
|
10554
|
-
reject(ex);
|
|
10555
|
-
l?.();
|
|
10556
|
-
}
|
|
10557
|
-
});
|
|
10558
|
-
this.processQueue();
|
|
10559
|
-
});
|
|
10560
|
-
}
|
|
10561
|
-
/**
|
|
10562
|
-
* Executes a callback for each data item in the stream
|
|
10563
|
-
*/
|
|
10564
|
-
forEach(callback) {
|
|
10565
|
-
if (this.dataQueue.length <= this.lowWatermark) {
|
|
10566
|
-
this.iterateAsyncErrored(async (l) => l.lowWater?.());
|
|
10567
|
-
}
|
|
10568
|
-
return this.registerListener({
|
|
10569
|
-
data: callback
|
|
10570
|
-
});
|
|
10571
|
-
}
|
|
10572
|
-
processQueue() {
|
|
10573
|
-
if (this.processingPromise) {
|
|
10574
|
-
return;
|
|
10575
|
-
}
|
|
10576
|
-
const promise = (this.processingPromise = this._processQueue());
|
|
10577
|
-
promise.finally(() => {
|
|
10578
|
-
this.processingPromise = null;
|
|
10579
|
-
});
|
|
10580
|
-
return promise;
|
|
10581
|
-
}
|
|
10582
|
-
hasDataReader() {
|
|
10583
|
-
return Array.from(this.listeners.values()).some((l) => !!l.data);
|
|
10584
|
-
}
|
|
10585
|
-
async _processQueue() {
|
|
10586
|
-
/**
|
|
10587
|
-
* Allow listeners to mutate the queue before processing.
|
|
10588
|
-
* This allows for operations such as dropping or compressing data
|
|
10589
|
-
* on high water or requesting more data on low water.
|
|
10590
|
-
*/
|
|
10591
|
-
if (this.dataQueue.length >= this.highWatermark) {
|
|
10592
|
-
await this.iterateAsyncErrored(async (l) => l.highWater?.());
|
|
10593
|
-
}
|
|
10594
|
-
if (this.isClosed || !this.hasDataReader()) {
|
|
10595
|
-
return;
|
|
10596
|
-
}
|
|
10597
|
-
if (this.dataQueue.length) {
|
|
10598
|
-
const data = this.dataQueue.shift();
|
|
10599
|
-
const mapped = this.mapLine(data);
|
|
10600
|
-
await this.iterateAsyncErrored(async (l) => l.data?.(mapped));
|
|
10601
|
-
}
|
|
10602
|
-
if (this.dataQueue.length <= this.lowWatermark) {
|
|
10603
|
-
const dataAdded = new Promise((resolve) => {
|
|
10604
|
-
this.notifyDataAdded = resolve;
|
|
10605
|
-
});
|
|
10606
|
-
await Promise.race([this.iterateAsyncErrored(async (l) => l.lowWater?.()), dataAdded]);
|
|
10607
|
-
this.notifyDataAdded = null;
|
|
10608
|
-
}
|
|
10609
|
-
if (this.dataQueue.length > 0) {
|
|
10610
|
-
setTimeout(() => this.processQueue());
|
|
10611
|
-
}
|
|
10612
|
-
}
|
|
10613
|
-
async iterateAsyncErrored(cb) {
|
|
10614
|
-
// Important: We need to copy the listeners, as calling a listener could result in adding another
|
|
10615
|
-
// listener, resulting in infinite loops.
|
|
10616
|
-
const listeners = Array.from(this.listeners.values());
|
|
10617
|
-
for (let i of listeners) {
|
|
10618
|
-
try {
|
|
10619
|
-
await cb(i);
|
|
10620
|
-
}
|
|
10621
|
-
catch (ex) {
|
|
10622
|
-
this.logger.error(ex);
|
|
10623
|
-
this.iterateListeners((l) => l.error?.(ex));
|
|
10624
|
-
}
|
|
10625
|
-
}
|
|
10626
|
-
}
|
|
10627
|
-
}
|
|
10628
|
-
|
|
10629
10750
|
var WebsocketDuplexConnection = {};
|
|
10630
10751
|
|
|
10631
10752
|
var hasRequiredWebsocketDuplexConnection;
|
|
@@ -10788,8 +10909,215 @@ class WebsocketClientTransport {
|
|
|
10788
10909
|
}
|
|
10789
10910
|
}
|
|
10790
10911
|
|
|
10912
|
+
const doneResult = { done: true, value: undefined };
|
|
10913
|
+
function valueResult(value) {
|
|
10914
|
+
return { done: false, value };
|
|
10915
|
+
}
|
|
10916
|
+
/**
|
|
10917
|
+
* A variant of {@link Array.map} for async iterators.
|
|
10918
|
+
*/
|
|
10919
|
+
function map(source, map) {
|
|
10920
|
+
return {
|
|
10921
|
+
next: async () => {
|
|
10922
|
+
const value = await source.next();
|
|
10923
|
+
if (value.done) {
|
|
10924
|
+
return value;
|
|
10925
|
+
}
|
|
10926
|
+
else {
|
|
10927
|
+
return { value: map(value.value) };
|
|
10928
|
+
}
|
|
10929
|
+
}
|
|
10930
|
+
};
|
|
10931
|
+
}
|
|
10932
|
+
/**
|
|
10933
|
+
* Expands a source async iterator by allowing to inject events asynchronously.
|
|
10934
|
+
*
|
|
10935
|
+
* The resulting iterator will emit all events from its source. Additionally though, events can be injected. These
|
|
10936
|
+
* events are dropped once the main iterator completes, but are otherwise forwarded.
|
|
10937
|
+
*
|
|
10938
|
+
* The iterator completes when its source completes, and it supports backpressure by only calling `next()` on the source
|
|
10939
|
+
* in response to a `next()` call from downstream if no pending injected events can be dispatched.
|
|
10940
|
+
*/
|
|
10941
|
+
function injectable(source) {
|
|
10942
|
+
let sourceIsDone = false;
|
|
10943
|
+
let waiter = undefined; // An active, waiting next() call.
|
|
10944
|
+
// A pending upstream event that couldn't be dispatched because inject() has been called before it was resolved.
|
|
10945
|
+
let pendingSourceEvent = null;
|
|
10946
|
+
let pendingInjectedEvents = [];
|
|
10947
|
+
const consumeWaiter = () => {
|
|
10948
|
+
const pending = waiter;
|
|
10949
|
+
waiter = undefined;
|
|
10950
|
+
return pending;
|
|
10951
|
+
};
|
|
10952
|
+
const fetchFromSource = () => {
|
|
10953
|
+
const resolveWaiter = (propagate) => {
|
|
10954
|
+
const active = consumeWaiter();
|
|
10955
|
+
if (active) {
|
|
10956
|
+
propagate(active);
|
|
10957
|
+
}
|
|
10958
|
+
else {
|
|
10959
|
+
pendingSourceEvent = propagate;
|
|
10960
|
+
}
|
|
10961
|
+
};
|
|
10962
|
+
const nextFromSource = source.next();
|
|
10963
|
+
nextFromSource.then((value) => {
|
|
10964
|
+
sourceIsDone = value.done == true;
|
|
10965
|
+
resolveWaiter((w) => w.resolve(value));
|
|
10966
|
+
}, (error) => {
|
|
10967
|
+
resolveWaiter((w) => w.reject(error));
|
|
10968
|
+
});
|
|
10969
|
+
};
|
|
10970
|
+
return {
|
|
10971
|
+
next: () => {
|
|
10972
|
+
return new Promise((resolve, reject) => {
|
|
10973
|
+
// First priority: Dispatch ready upstream events.
|
|
10974
|
+
if (sourceIsDone) {
|
|
10975
|
+
return resolve(doneResult);
|
|
10976
|
+
}
|
|
10977
|
+
if (pendingSourceEvent) {
|
|
10978
|
+
pendingSourceEvent({ resolve, reject });
|
|
10979
|
+
pendingSourceEvent = null;
|
|
10980
|
+
return;
|
|
10981
|
+
}
|
|
10982
|
+
// Second priority: Dispatch injected events
|
|
10983
|
+
if (pendingInjectedEvents.length) {
|
|
10984
|
+
return resolve(valueResult(pendingInjectedEvents.shift()));
|
|
10985
|
+
}
|
|
10986
|
+
// Nothing pending? Fetch from source
|
|
10987
|
+
waiter = { resolve, reject };
|
|
10988
|
+
return fetchFromSource();
|
|
10989
|
+
});
|
|
10990
|
+
},
|
|
10991
|
+
inject: (event) => {
|
|
10992
|
+
const pending = consumeWaiter();
|
|
10993
|
+
if (pending != null) {
|
|
10994
|
+
pending.resolve(valueResult(event));
|
|
10995
|
+
}
|
|
10996
|
+
else {
|
|
10997
|
+
pendingInjectedEvents.push(event);
|
|
10998
|
+
}
|
|
10999
|
+
}
|
|
11000
|
+
};
|
|
11001
|
+
}
|
|
11002
|
+
/**
|
|
11003
|
+
* Splits a byte stream at line endings, emitting each line as a string.
|
|
11004
|
+
*/
|
|
11005
|
+
function extractJsonLines(source, decoder) {
|
|
11006
|
+
let buffer = '';
|
|
11007
|
+
const pendingLines = [];
|
|
11008
|
+
let isFinalEvent = false;
|
|
11009
|
+
return {
|
|
11010
|
+
next: async () => {
|
|
11011
|
+
while (true) {
|
|
11012
|
+
if (isFinalEvent) {
|
|
11013
|
+
return doneResult;
|
|
11014
|
+
}
|
|
11015
|
+
{
|
|
11016
|
+
const first = pendingLines.shift();
|
|
11017
|
+
if (first) {
|
|
11018
|
+
return { done: false, value: first };
|
|
11019
|
+
}
|
|
11020
|
+
}
|
|
11021
|
+
const { done, value } = await source.next();
|
|
11022
|
+
if (done) {
|
|
11023
|
+
const remaining = buffer.trim();
|
|
11024
|
+
if (remaining.length != 0) {
|
|
11025
|
+
isFinalEvent = true;
|
|
11026
|
+
return { done: false, value: remaining };
|
|
11027
|
+
}
|
|
11028
|
+
return doneResult;
|
|
11029
|
+
}
|
|
11030
|
+
const data = decoder.decode(value, { stream: true });
|
|
11031
|
+
buffer += data;
|
|
11032
|
+
const lines = buffer.split('\n');
|
|
11033
|
+
for (let i = 0; i < lines.length - 1; i++) {
|
|
11034
|
+
const l = lines[i].trim();
|
|
11035
|
+
if (l.length > 0) {
|
|
11036
|
+
pendingLines.push(l);
|
|
11037
|
+
}
|
|
11038
|
+
}
|
|
11039
|
+
buffer = lines[lines.length - 1];
|
|
11040
|
+
}
|
|
11041
|
+
}
|
|
11042
|
+
};
|
|
11043
|
+
}
|
|
11044
|
+
/**
|
|
11045
|
+
* Splits a concatenated stream of BSON objects by emitting individual objects.
|
|
11046
|
+
*/
|
|
11047
|
+
function extractBsonObjects(source) {
|
|
11048
|
+
// Fully read but not emitted yet.
|
|
11049
|
+
const completedObjects = [];
|
|
11050
|
+
// Whether source has returned { done: true }. We do the same once completed objects have been emitted.
|
|
11051
|
+
let isDone = false;
|
|
11052
|
+
const lengthBuffer = new DataView(new ArrayBuffer(4));
|
|
11053
|
+
let objectBody = null;
|
|
11054
|
+
// If we're parsing the length field, a number between 1 and 4 (inclusive) describing remaining bytes in the header.
|
|
11055
|
+
// If we're consuming a document, the bytes remaining.
|
|
11056
|
+
let remainingLength = 4;
|
|
11057
|
+
return {
|
|
11058
|
+
async next() {
|
|
11059
|
+
while (true) {
|
|
11060
|
+
// Before fetching new data from upstream, return completed objects.
|
|
11061
|
+
if (completedObjects.length) {
|
|
11062
|
+
return valueResult(completedObjects.shift());
|
|
11063
|
+
}
|
|
11064
|
+
if (isDone) {
|
|
11065
|
+
return doneResult;
|
|
11066
|
+
}
|
|
11067
|
+
const upstreamEvent = await source.next();
|
|
11068
|
+
if (upstreamEvent.done) {
|
|
11069
|
+
isDone = true;
|
|
11070
|
+
if (objectBody || remainingLength != 4) {
|
|
11071
|
+
throw new Error('illegal end of stream in BSON object');
|
|
11072
|
+
}
|
|
11073
|
+
return doneResult;
|
|
11074
|
+
}
|
|
11075
|
+
const chunk = upstreamEvent.value;
|
|
11076
|
+
for (let i = 0; i < chunk.length;) {
|
|
11077
|
+
const availableInData = chunk.length - i;
|
|
11078
|
+
if (objectBody) {
|
|
11079
|
+
// We're in the middle of reading a BSON document.
|
|
11080
|
+
const bytesToRead = Math.min(availableInData, remainingLength);
|
|
11081
|
+
const copySource = new Uint8Array(chunk.buffer, chunk.byteOffset + i, bytesToRead);
|
|
11082
|
+
objectBody.set(copySource, objectBody.length - remainingLength);
|
|
11083
|
+
i += bytesToRead;
|
|
11084
|
+
remainingLength -= bytesToRead;
|
|
11085
|
+
if (remainingLength == 0) {
|
|
11086
|
+
completedObjects.push(objectBody);
|
|
11087
|
+
// Prepare to read another document, starting with its length
|
|
11088
|
+
objectBody = null;
|
|
11089
|
+
remainingLength = 4;
|
|
11090
|
+
}
|
|
11091
|
+
}
|
|
11092
|
+
else {
|
|
11093
|
+
// Copy up to 4 bytes into lengthBuffer, depending on how many we still need.
|
|
11094
|
+
const bytesToRead = Math.min(availableInData, remainingLength);
|
|
11095
|
+
for (let j = 0; j < bytesToRead; j++) {
|
|
11096
|
+
lengthBuffer.setUint8(4 - remainingLength + j, chunk[i + j]);
|
|
11097
|
+
}
|
|
11098
|
+
i += bytesToRead;
|
|
11099
|
+
remainingLength -= bytesToRead;
|
|
11100
|
+
if (remainingLength == 0) {
|
|
11101
|
+
// Transition from reading length header to reading document. Subtracting 4 because the length of the
|
|
11102
|
+
// header is included in length.
|
|
11103
|
+
const length = lengthBuffer.getInt32(0, true /* little endian */);
|
|
11104
|
+
remainingLength = length - 4;
|
|
11105
|
+
if (remainingLength < 1) {
|
|
11106
|
+
throw new Error(`invalid length for bson: ${length}`);
|
|
11107
|
+
}
|
|
11108
|
+
objectBody = new Uint8Array(length);
|
|
11109
|
+
new DataView(objectBody.buffer).setInt32(0, length, true);
|
|
11110
|
+
}
|
|
11111
|
+
}
|
|
11112
|
+
}
|
|
11113
|
+
}
|
|
11114
|
+
}
|
|
11115
|
+
};
|
|
11116
|
+
}
|
|
11117
|
+
|
|
10791
11118
|
const POWERSYNC_TRAILING_SLASH_MATCH = /\/+$/;
|
|
10792
11119
|
const POWERSYNC_JS_VERSION = PACKAGE.version;
|
|
11120
|
+
const SYNC_QUEUE_REQUEST_HIGH_WATER = 10;
|
|
10793
11121
|
const SYNC_QUEUE_REQUEST_LOW_WATER = 5;
|
|
10794
11122
|
// Keep alive message is sent every period
|
|
10795
11123
|
const KEEP_ALIVE_MS = 20_000;
|
|
@@ -10969,13 +11297,14 @@ class AbstractRemote {
|
|
|
10969
11297
|
return new WebSocket(url);
|
|
10970
11298
|
}
|
|
10971
11299
|
/**
|
|
10972
|
-
* Returns a data stream of sync line data.
|
|
11300
|
+
* Returns a data stream of sync line data, fetched via RSocket-over-WebSocket.
|
|
11301
|
+
*
|
|
11302
|
+
* The only mechanism to abort the returned stream is to use the abort signal in {@link SocketSyncStreamOptions}.
|
|
10973
11303
|
*
|
|
10974
|
-
* @param map Maps received payload frames to the typed event value.
|
|
10975
11304
|
* @param bson A BSON encoder and decoder. When set, the data stream will be requested with a BSON payload
|
|
10976
11305
|
* (required for compatibility with older sync services).
|
|
10977
11306
|
*/
|
|
10978
|
-
async socketStreamRaw(options,
|
|
11307
|
+
async socketStreamRaw(options, bson) {
|
|
10979
11308
|
const { path, fetchStrategy = FetchStrategy.Buffered } = options;
|
|
10980
11309
|
const mimeType = bson == null ? 'application/json' : 'application/bson';
|
|
10981
11310
|
function toBuffer(js) {
|
|
@@ -10990,52 +11319,55 @@ class AbstractRemote {
|
|
|
10990
11319
|
}
|
|
10991
11320
|
const syncQueueRequestSize = fetchStrategy == FetchStrategy.Buffered ? 10 : 1;
|
|
10992
11321
|
const request = await this.buildRequest(path);
|
|
11322
|
+
const url = this.options.socketUrlTransformer(request.url);
|
|
10993
11323
|
// Add the user agent in the setup payload - we can't set custom
|
|
10994
11324
|
// headers with websockets on web. The browser userAgent is however added
|
|
10995
11325
|
// automatically as a header.
|
|
10996
11326
|
const userAgent = this.getUserAgent();
|
|
10997
|
-
|
|
10998
|
-
|
|
10999
|
-
|
|
11000
|
-
|
|
11001
|
-
|
|
11002
|
-
|
|
11003
|
-
|
|
11327
|
+
// While we're connecting (a process that can't be aborted in RSocket), the WebSocket instance to close if we wanted
|
|
11328
|
+
// to abort the connection.
|
|
11329
|
+
let pendingSocket = null;
|
|
11330
|
+
let keepAliveTimeout;
|
|
11331
|
+
let rsocket = null;
|
|
11332
|
+
let queue = null;
|
|
11333
|
+
let didClose = false;
|
|
11334
|
+
const abortRequest = () => {
|
|
11335
|
+
if (didClose) {
|
|
11336
|
+
return;
|
|
11337
|
+
}
|
|
11338
|
+
didClose = true;
|
|
11339
|
+
clearTimeout(keepAliveTimeout);
|
|
11340
|
+
if (pendingSocket) {
|
|
11341
|
+
pendingSocket.close();
|
|
11342
|
+
}
|
|
11343
|
+
if (rsocket) {
|
|
11344
|
+
rsocket.close();
|
|
11345
|
+
}
|
|
11346
|
+
if (queue) {
|
|
11347
|
+
queue.stop();
|
|
11348
|
+
}
|
|
11349
|
+
};
|
|
11004
11350
|
// Handle upstream abort
|
|
11005
|
-
if (options.abortSignal
|
|
11351
|
+
if (options.abortSignal.aborted) {
|
|
11006
11352
|
throw new AbortOperation('Connection request aborted');
|
|
11007
11353
|
}
|
|
11008
11354
|
else {
|
|
11009
|
-
options.abortSignal
|
|
11010
|
-
stream.close();
|
|
11011
|
-
}, { once: true });
|
|
11355
|
+
options.abortSignal.addEventListener('abort', abortRequest);
|
|
11012
11356
|
}
|
|
11013
|
-
let keepAliveTimeout;
|
|
11014
11357
|
const resetTimeout = () => {
|
|
11015
11358
|
clearTimeout(keepAliveTimeout);
|
|
11016
11359
|
keepAliveTimeout = setTimeout(() => {
|
|
11017
11360
|
this.logger.error(`No data received on WebSocket in ${SOCKET_TIMEOUT_MS}ms, closing connection.`);
|
|
11018
|
-
|
|
11361
|
+
abortRequest();
|
|
11019
11362
|
}, SOCKET_TIMEOUT_MS);
|
|
11020
11363
|
};
|
|
11021
11364
|
resetTimeout();
|
|
11022
|
-
// Typescript complains about this being `never` if it's not assigned here.
|
|
11023
|
-
// This is assigned in `wsCreator`.
|
|
11024
|
-
let disposeSocketConnectionTimeout = () => { };
|
|
11025
|
-
const url = this.options.socketUrlTransformer(request.url);
|
|
11026
11365
|
const connector = new distExports.RSocketConnector({
|
|
11027
11366
|
transport: new WebsocketClientTransport({
|
|
11028
11367
|
url,
|
|
11029
11368
|
wsCreator: (url) => {
|
|
11030
|
-
const socket = this.createSocket(url);
|
|
11031
|
-
|
|
11032
|
-
closed: () => {
|
|
11033
|
-
// Allow closing the underlying WebSocket if the stream was closed before the
|
|
11034
|
-
// RSocket connect completed. This should effectively abort the request.
|
|
11035
|
-
socket.close();
|
|
11036
|
-
}
|
|
11037
|
-
});
|
|
11038
|
-
socket.addEventListener('message', (event) => {
|
|
11369
|
+
const socket = (pendingSocket = this.createSocket(url));
|
|
11370
|
+
socket.addEventListener('message', () => {
|
|
11039
11371
|
resetTimeout();
|
|
11040
11372
|
});
|
|
11041
11373
|
return socket;
|
|
@@ -11055,43 +11387,40 @@ class AbstractRemote {
|
|
|
11055
11387
|
}
|
|
11056
11388
|
}
|
|
11057
11389
|
});
|
|
11058
|
-
let rsocket;
|
|
11059
11390
|
try {
|
|
11060
11391
|
rsocket = await connector.connect();
|
|
11061
11392
|
// The connection is established, we no longer need to monitor the initial timeout
|
|
11062
|
-
|
|
11393
|
+
pendingSocket = null;
|
|
11063
11394
|
}
|
|
11064
11395
|
catch (ex) {
|
|
11065
11396
|
this.logger.error(`Failed to connect WebSocket`, ex);
|
|
11066
|
-
|
|
11067
|
-
if (!stream.closed) {
|
|
11068
|
-
await stream.close();
|
|
11069
|
-
}
|
|
11397
|
+
abortRequest();
|
|
11070
11398
|
throw ex;
|
|
11071
11399
|
}
|
|
11072
11400
|
resetTimeout();
|
|
11073
|
-
let socketIsClosed = false;
|
|
11074
|
-
const closeSocket = () => {
|
|
11075
|
-
clearTimeout(keepAliveTimeout);
|
|
11076
|
-
if (socketIsClosed) {
|
|
11077
|
-
return;
|
|
11078
|
-
}
|
|
11079
|
-
socketIsClosed = true;
|
|
11080
|
-
rsocket.close();
|
|
11081
|
-
};
|
|
11082
11401
|
// Helps to prevent double close scenarios
|
|
11083
|
-
rsocket.onClose(() => (
|
|
11084
|
-
|
|
11085
|
-
let pendingEventsCount = syncQueueRequestSize;
|
|
11086
|
-
const disposeClosedListener = stream.registerListener({
|
|
11087
|
-
closed: () => {
|
|
11088
|
-
closeSocket();
|
|
11089
|
-
disposeClosedListener();
|
|
11090
|
-
}
|
|
11091
|
-
});
|
|
11092
|
-
const socket = await new Promise((resolve, reject) => {
|
|
11402
|
+
rsocket.onClose(() => (rsocket = null));
|
|
11403
|
+
return await new Promise((resolve, reject) => {
|
|
11093
11404
|
let connectionEstablished = false;
|
|
11094
|
-
|
|
11405
|
+
let pendingEventsCount = syncQueueRequestSize;
|
|
11406
|
+
let paused = false;
|
|
11407
|
+
let res = null;
|
|
11408
|
+
function requestMore() {
|
|
11409
|
+
const delta = syncQueueRequestSize - pendingEventsCount;
|
|
11410
|
+
if (!paused && delta > 0) {
|
|
11411
|
+
res?.request(delta);
|
|
11412
|
+
pendingEventsCount = syncQueueRequestSize;
|
|
11413
|
+
}
|
|
11414
|
+
}
|
|
11415
|
+
const events = new domExports.EventIterator((q) => {
|
|
11416
|
+
queue = q;
|
|
11417
|
+
q.on('highWater', () => (paused = true));
|
|
11418
|
+
q.on('lowWater', () => {
|
|
11419
|
+
paused = false;
|
|
11420
|
+
requestMore();
|
|
11421
|
+
});
|
|
11422
|
+
}, { highWaterMark: SYNC_QUEUE_REQUEST_HIGH_WATER, lowWaterMark: SYNC_QUEUE_REQUEST_LOW_WATER })[symbolAsyncIterator]();
|
|
11423
|
+
res = rsocket.requestStream({
|
|
11095
11424
|
data: toBuffer(options.data),
|
|
11096
11425
|
metadata: toBuffer({
|
|
11097
11426
|
path
|
|
@@ -11116,7 +11445,7 @@ class AbstractRemote {
|
|
|
11116
11445
|
}
|
|
11117
11446
|
// RSocket will close the RSocket stream automatically
|
|
11118
11447
|
// Close the downstream stream as well - this will close the RSocket connection and WebSocket
|
|
11119
|
-
|
|
11448
|
+
abortRequest();
|
|
11120
11449
|
// Handles cases where the connection failed e.g. auth error or connection error
|
|
11121
11450
|
if (!connectionEstablished) {
|
|
11122
11451
|
reject(e);
|
|
@@ -11126,41 +11455,40 @@ class AbstractRemote {
|
|
|
11126
11455
|
// The connection is active
|
|
11127
11456
|
if (!connectionEstablished) {
|
|
11128
11457
|
connectionEstablished = true;
|
|
11129
|
-
resolve(
|
|
11458
|
+
resolve(events);
|
|
11130
11459
|
}
|
|
11131
11460
|
const { data } = payload;
|
|
11461
|
+
if (data) {
|
|
11462
|
+
queue.push(data);
|
|
11463
|
+
}
|
|
11132
11464
|
// Less events are now pending
|
|
11133
11465
|
pendingEventsCount--;
|
|
11134
|
-
|
|
11135
|
-
|
|
11136
|
-
}
|
|
11137
|
-
stream.enqueueData(data);
|
|
11466
|
+
// Request another event (unless the downstream consumer is paused).
|
|
11467
|
+
requestMore();
|
|
11138
11468
|
},
|
|
11139
11469
|
onComplete: () => {
|
|
11140
|
-
|
|
11470
|
+
abortRequest(); // this will also emit a done event
|
|
11141
11471
|
},
|
|
11142
11472
|
onExtension: () => { }
|
|
11143
11473
|
});
|
|
11144
11474
|
});
|
|
11145
|
-
const l = stream.registerListener({
|
|
11146
|
-
lowWater: async () => {
|
|
11147
|
-
// Request to fill up the queue
|
|
11148
|
-
const required = syncQueueRequestSize - pendingEventsCount;
|
|
11149
|
-
if (required > 0) {
|
|
11150
|
-
socket.request(syncQueueRequestSize - pendingEventsCount);
|
|
11151
|
-
pendingEventsCount = syncQueueRequestSize;
|
|
11152
|
-
}
|
|
11153
|
-
},
|
|
11154
|
-
closed: () => {
|
|
11155
|
-
l();
|
|
11156
|
-
}
|
|
11157
|
-
});
|
|
11158
|
-
return stream;
|
|
11159
11475
|
}
|
|
11160
11476
|
/**
|
|
11161
|
-
*
|
|
11477
|
+
* @returns Whether the HTTP implementation on this platform can receive streamed binary responses. This is true on
|
|
11478
|
+
* all platforms except React Native (who would have guessed...), where we must not request BSON responses.
|
|
11479
|
+
*
|
|
11480
|
+
* @see https://github.com/react-native-community/fetch?tab=readme-ov-file#motivation
|
|
11481
|
+
*/
|
|
11482
|
+
get supportsStreamingBinaryResponses() {
|
|
11483
|
+
return true;
|
|
11484
|
+
}
|
|
11485
|
+
/**
|
|
11486
|
+
* Posts a `/sync/stream` request, asserts that it completes successfully and returns the streaming response as an
|
|
11487
|
+
* async iterator of byte blobs.
|
|
11488
|
+
*
|
|
11489
|
+
* To cancel the async iterator, use the abort signal from {@link SyncStreamOptions} passed to this method.
|
|
11162
11490
|
*/
|
|
11163
|
-
async
|
|
11491
|
+
async fetchStreamRaw(options) {
|
|
11164
11492
|
const { data, path, headers, abortSignal } = options;
|
|
11165
11493
|
const request = await this.buildRequest(path);
|
|
11166
11494
|
/**
|
|
@@ -11172,119 +11500,94 @@ class AbstractRemote {
|
|
|
11172
11500
|
* Aborting the active fetch request while it is being consumed seems to throw
|
|
11173
11501
|
* an unhandled exception on the window level.
|
|
11174
11502
|
*/
|
|
11175
|
-
if (abortSignal
|
|
11176
|
-
throw new AbortOperation('Abort request received before making
|
|
11503
|
+
if (abortSignal.aborted) {
|
|
11504
|
+
throw new AbortOperation('Abort request received before making fetchStreamRaw request');
|
|
11177
11505
|
}
|
|
11178
11506
|
const controller = new AbortController();
|
|
11179
|
-
let
|
|
11180
|
-
abortSignal
|
|
11181
|
-
|
|
11507
|
+
let reader = null;
|
|
11508
|
+
abortSignal.addEventListener('abort', () => {
|
|
11509
|
+
const reason = abortSignal.reason ??
|
|
11510
|
+
new AbortOperation('Cancelling network request before it resolves. Abort signal has been received.');
|
|
11511
|
+
if (reader == null) {
|
|
11182
11512
|
// Only abort via the abort controller if the request has not resolved yet
|
|
11183
|
-
controller.abort(
|
|
11184
|
-
|
|
11513
|
+
controller.abort(reason);
|
|
11514
|
+
}
|
|
11515
|
+
else {
|
|
11516
|
+
reader.cancel(reason).catch(() => {
|
|
11517
|
+
// Cancelling the reader might rethrow an exception we would have handled by throwing in next(). So we can
|
|
11518
|
+
// ignore it here.
|
|
11519
|
+
});
|
|
11185
11520
|
}
|
|
11186
11521
|
});
|
|
11187
|
-
|
|
11188
|
-
|
|
11189
|
-
|
|
11190
|
-
|
|
11191
|
-
|
|
11192
|
-
|
|
11193
|
-
|
|
11194
|
-
|
|
11195
|
-
|
|
11522
|
+
let res;
|
|
11523
|
+
let responseIsBson = false;
|
|
11524
|
+
try {
|
|
11525
|
+
const ndJson = 'application/x-ndjson';
|
|
11526
|
+
const bson = 'application/vnd.powersync.bson-stream';
|
|
11527
|
+
res = await this.fetch(request.url, {
|
|
11528
|
+
method: 'POST',
|
|
11529
|
+
headers: {
|
|
11530
|
+
...headers,
|
|
11531
|
+
...request.headers,
|
|
11532
|
+
accept: this.supportsStreamingBinaryResponses ? `${bson};q=0.9,${ndJson};q=0.8` : ndJson
|
|
11533
|
+
},
|
|
11534
|
+
body: JSON.stringify(data),
|
|
11535
|
+
signal: controller.signal,
|
|
11536
|
+
cache: 'no-store',
|
|
11537
|
+
...(this.options.fetchOptions ?? {}),
|
|
11538
|
+
...options.fetchOptions
|
|
11539
|
+
});
|
|
11540
|
+
if (!res.ok || !res.body) {
|
|
11541
|
+
const text = await res.text();
|
|
11542
|
+
this.logger.error(`Could not POST streaming to ${path} - ${res.status} - ${res.statusText}: ${text}`);
|
|
11543
|
+
const error = new Error(`HTTP ${res.statusText}: ${text}`);
|
|
11544
|
+
error.status = res.status;
|
|
11545
|
+
throw error;
|
|
11546
|
+
}
|
|
11547
|
+
const contentType = res.headers.get('content-type');
|
|
11548
|
+
responseIsBson = contentType == bson;
|
|
11549
|
+
}
|
|
11550
|
+
catch (ex) {
|
|
11196
11551
|
if (ex.name == 'AbortError') {
|
|
11197
11552
|
throw new AbortOperation(`Pending fetch request to ${request.url} has been aborted.`);
|
|
11198
11553
|
}
|
|
11199
11554
|
throw ex;
|
|
11200
|
-
});
|
|
11201
|
-
if (!res) {
|
|
11202
|
-
throw new Error('Fetch request was aborted');
|
|
11203
|
-
}
|
|
11204
|
-
requestResolved = true;
|
|
11205
|
-
if (!res.ok || !res.body) {
|
|
11206
|
-
const text = await res.text();
|
|
11207
|
-
this.logger.error(`Could not POST streaming to ${path} - ${res.status} - ${res.statusText}: ${text}`);
|
|
11208
|
-
const error = new Error(`HTTP ${res.statusText}: ${text}`);
|
|
11209
|
-
error.status = res.status;
|
|
11210
|
-
throw error;
|
|
11211
11555
|
}
|
|
11212
|
-
|
|
11213
|
-
|
|
11214
|
-
|
|
11215
|
-
|
|
11216
|
-
|
|
11217
|
-
const closeReader = async () => {
|
|
11218
|
-
try {
|
|
11219
|
-
readerReleased = true;
|
|
11220
|
-
await reader.cancel();
|
|
11221
|
-
}
|
|
11222
|
-
catch (ex) {
|
|
11223
|
-
// an error will throw if the reader hasn't been used yet
|
|
11224
|
-
}
|
|
11225
|
-
reader.releaseLock();
|
|
11226
|
-
};
|
|
11227
|
-
const stream = new DataStream({
|
|
11228
|
-
logger: this.logger,
|
|
11229
|
-
mapLine: mapLine,
|
|
11230
|
-
pressure: {
|
|
11231
|
-
highWaterMark: 20,
|
|
11232
|
-
lowWaterMark: 10
|
|
11233
|
-
}
|
|
11234
|
-
});
|
|
11235
|
-
abortSignal?.addEventListener('abort', () => {
|
|
11236
|
-
closeReader();
|
|
11237
|
-
stream.close();
|
|
11238
|
-
});
|
|
11239
|
-
const decoder = this.createTextDecoder();
|
|
11240
|
-
let buffer = '';
|
|
11241
|
-
const consumeStream = async () => {
|
|
11242
|
-
while (!stream.closed && !abortSignal?.aborted && !readerReleased) {
|
|
11243
|
-
const { done, value } = await reader.read();
|
|
11244
|
-
if (done) {
|
|
11245
|
-
const remaining = buffer.trim();
|
|
11246
|
-
if (remaining.length != 0) {
|
|
11247
|
-
stream.enqueueData(remaining);
|
|
11248
|
-
}
|
|
11249
|
-
stream.close();
|
|
11250
|
-
await closeReader();
|
|
11251
|
-
return;
|
|
11556
|
+
reader = res.body.getReader();
|
|
11557
|
+
const stream = {
|
|
11558
|
+
next: async () => {
|
|
11559
|
+
if (controller.signal.aborted) {
|
|
11560
|
+
return doneResult;
|
|
11252
11561
|
}
|
|
11253
|
-
|
|
11254
|
-
|
|
11255
|
-
const lines = buffer.split('\n');
|
|
11256
|
-
for (var i = 0; i < lines.length - 1; i++) {
|
|
11257
|
-
var l = lines[i].trim();
|
|
11258
|
-
if (l.length > 0) {
|
|
11259
|
-
stream.enqueueData(l);
|
|
11260
|
-
}
|
|
11562
|
+
try {
|
|
11563
|
+
return await reader.read();
|
|
11261
11564
|
}
|
|
11262
|
-
|
|
11263
|
-
|
|
11264
|
-
|
|
11265
|
-
|
|
11266
|
-
|
|
11267
|
-
|
|
11268
|
-
|
|
11269
|
-
dispose();
|
|
11270
|
-
},
|
|
11271
|
-
closed: () => {
|
|
11272
|
-
resolve();
|
|
11273
|
-
dispose();
|
|
11274
|
-
}
|
|
11275
|
-
});
|
|
11276
|
-
});
|
|
11565
|
+
catch (ex) {
|
|
11566
|
+
if (controller.signal.aborted) {
|
|
11567
|
+
// .read() completes with an error if we cancel the reader, which we do to disconnect. So this is just
|
|
11568
|
+
// things working as intended, we can return a done event and consider the exception handled.
|
|
11569
|
+
return doneResult;
|
|
11570
|
+
}
|
|
11571
|
+
throw ex;
|
|
11277
11572
|
}
|
|
11278
11573
|
}
|
|
11279
11574
|
};
|
|
11280
|
-
|
|
11281
|
-
|
|
11282
|
-
|
|
11283
|
-
|
|
11284
|
-
|
|
11285
|
-
|
|
11286
|
-
|
|
11287
|
-
|
|
11575
|
+
return { isBson: responseIsBson, stream };
|
|
11576
|
+
}
|
|
11577
|
+
/**
|
|
11578
|
+
* Posts a `/sync/stream` request.
|
|
11579
|
+
*
|
|
11580
|
+
* Depending on the `Content-Type` of the response, this returns strings for sync lines or encoded BSON documents as
|
|
11581
|
+
* {@link Uint8Array}s.
|
|
11582
|
+
*/
|
|
11583
|
+
async fetchStream(options) {
|
|
11584
|
+
const { isBson, stream } = await this.fetchStreamRaw(options);
|
|
11585
|
+
if (isBson) {
|
|
11586
|
+
return extractBsonObjects(stream);
|
|
11587
|
+
}
|
|
11588
|
+
else {
|
|
11589
|
+
return extractJsonLines(stream, this.createTextDecoder());
|
|
11590
|
+
}
|
|
11288
11591
|
}
|
|
11289
11592
|
}
|
|
11290
11593
|
|
|
@@ -11792,6 +12095,19 @@ The next upload iteration will be delayed.`);
|
|
|
11792
12095
|
}
|
|
11793
12096
|
});
|
|
11794
12097
|
}
|
|
12098
|
+
async receiveSyncLines(data) {
|
|
12099
|
+
const { options, connection, bson } = data;
|
|
12100
|
+
const remote = this.options.remote;
|
|
12101
|
+
if (connection.connectionMethod == SyncStreamConnectionMethod.HTTP) {
|
|
12102
|
+
return await remote.fetchStream(options);
|
|
12103
|
+
}
|
|
12104
|
+
else {
|
|
12105
|
+
return await this.options.remote.socketStreamRaw({
|
|
12106
|
+
...options,
|
|
12107
|
+
...{ fetchStrategy: connection.fetchStrategy }
|
|
12108
|
+
}, bson);
|
|
12109
|
+
}
|
|
12110
|
+
}
|
|
11795
12111
|
async legacyStreamingSyncIteration(signal, resolvedOptions) {
|
|
11796
12112
|
const rawTables = resolvedOptions.serializedSchema?.raw_tables;
|
|
11797
12113
|
if (rawTables != null && rawTables.length) {
|
|
@@ -11821,42 +12137,27 @@ The next upload iteration will be delayed.`);
|
|
|
11821
12137
|
client_id: clientId
|
|
11822
12138
|
}
|
|
11823
12139
|
};
|
|
11824
|
-
|
|
11825
|
-
|
|
11826
|
-
|
|
11827
|
-
|
|
11828
|
-
|
|
11829
|
-
|
|
11830
|
-
|
|
11831
|
-
|
|
11832
|
-
|
|
11833
|
-
|
|
11834
|
-
|
|
11835
|
-
|
|
11836
|
-
|
|
11837
|
-
|
|
11838
|
-
stream = await this.options.remote.socketStreamRaw({
|
|
11839
|
-
...syncOptions,
|
|
11840
|
-
...{ fetchStrategy: resolvedOptions.fetchStrategy }
|
|
11841
|
-
}, (payload) => {
|
|
11842
|
-
if (payload instanceof Uint8Array) {
|
|
11843
|
-
return bson.deserialize(payload);
|
|
11844
|
-
}
|
|
11845
|
-
else {
|
|
11846
|
-
// Directly enqueued by us
|
|
11847
|
-
return payload;
|
|
11848
|
-
}
|
|
11849
|
-
}, bson);
|
|
11850
|
-
}
|
|
12140
|
+
const bson = await this.options.remote.getBSON();
|
|
12141
|
+
const source = await this.receiveSyncLines({
|
|
12142
|
+
options: syncOptions,
|
|
12143
|
+
connection: resolvedOptions,
|
|
12144
|
+
bson
|
|
12145
|
+
});
|
|
12146
|
+
const stream = injectable(map(source, (line) => {
|
|
12147
|
+
if (typeof line == 'string') {
|
|
12148
|
+
return JSON.parse(line);
|
|
12149
|
+
}
|
|
12150
|
+
else {
|
|
12151
|
+
return bson.deserialize(line);
|
|
12152
|
+
}
|
|
12153
|
+
}));
|
|
11851
12154
|
this.logger.debug('Stream established. Processing events');
|
|
11852
12155
|
this.notifyCompletedUploads = () => {
|
|
11853
|
-
|
|
11854
|
-
stream.enqueueData({ crud_upload_completed: null });
|
|
11855
|
-
}
|
|
12156
|
+
stream.inject({ crud_upload_completed: null });
|
|
11856
12157
|
};
|
|
11857
|
-
while (
|
|
11858
|
-
const line = await stream.
|
|
11859
|
-
if (
|
|
12158
|
+
while (true) {
|
|
12159
|
+
const { value: line, done } = await stream.next();
|
|
12160
|
+
if (done) {
|
|
11860
12161
|
// The stream has closed while waiting
|
|
11861
12162
|
return;
|
|
11862
12163
|
}
|
|
@@ -12035,14 +12336,17 @@ The next upload iteration will be delayed.`);
|
|
|
12035
12336
|
const syncImplementation = this;
|
|
12036
12337
|
const adapter = this.options.adapter;
|
|
12037
12338
|
const remote = this.options.remote;
|
|
12339
|
+
const controller = new AbortController();
|
|
12340
|
+
const abort = () => {
|
|
12341
|
+
return controller.abort(signal.reason);
|
|
12342
|
+
};
|
|
12343
|
+
signal.addEventListener('abort', abort);
|
|
12038
12344
|
let receivingLines = null;
|
|
12039
12345
|
let hadSyncLine = false;
|
|
12040
12346
|
let hideDisconnectOnRestart = false;
|
|
12041
12347
|
if (signal.aborted) {
|
|
12042
12348
|
throw new AbortOperation('Connection request has been aborted');
|
|
12043
12349
|
}
|
|
12044
|
-
const abortController = new AbortController();
|
|
12045
|
-
signal.addEventListener('abort', () => abortController.abort());
|
|
12046
12350
|
// Pending sync lines received from the service, as well as local events that trigger a powersync_control
|
|
12047
12351
|
// invocation (local events include refreshed tokens and completed uploads).
|
|
12048
12352
|
// This is a single data stream so that we can handle all control calls from a single place.
|
|
@@ -12050,49 +12354,36 @@ The next upload iteration will be delayed.`);
|
|
|
12050
12354
|
async function connect(instr) {
|
|
12051
12355
|
const syncOptions = {
|
|
12052
12356
|
path: '/sync/stream',
|
|
12053
|
-
abortSignal:
|
|
12357
|
+
abortSignal: controller.signal,
|
|
12054
12358
|
data: instr.request
|
|
12055
12359
|
};
|
|
12056
|
-
|
|
12057
|
-
|
|
12058
|
-
|
|
12059
|
-
|
|
12060
|
-
|
|
12061
|
-
|
|
12062
|
-
|
|
12063
|
-
|
|
12064
|
-
|
|
12065
|
-
|
|
12066
|
-
|
|
12067
|
-
|
|
12068
|
-
|
|
12069
|
-
|
|
12070
|
-
|
|
12071
|
-
|
|
12072
|
-
|
|
12073
|
-
fetchStrategy: resolvedOptions.fetchStrategy
|
|
12074
|
-
}, (payload) => {
|
|
12075
|
-
if (payload instanceof Uint8Array) {
|
|
12076
|
-
return {
|
|
12077
|
-
command: PowerSyncControlCommand.PROCESS_BSON_LINE,
|
|
12078
|
-
payload: payload
|
|
12079
|
-
};
|
|
12080
|
-
}
|
|
12081
|
-
else {
|
|
12082
|
-
// Directly enqueued by us
|
|
12083
|
-
return payload;
|
|
12084
|
-
}
|
|
12085
|
-
});
|
|
12086
|
-
}
|
|
12360
|
+
controlInvocations = injectable(map(await syncImplementation.receiveSyncLines({
|
|
12361
|
+
options: syncOptions,
|
|
12362
|
+
connection: resolvedOptions
|
|
12363
|
+
}), (line) => {
|
|
12364
|
+
if (typeof line == 'string') {
|
|
12365
|
+
return {
|
|
12366
|
+
command: PowerSyncControlCommand.PROCESS_TEXT_LINE,
|
|
12367
|
+
payload: line
|
|
12368
|
+
};
|
|
12369
|
+
}
|
|
12370
|
+
else {
|
|
12371
|
+
return {
|
|
12372
|
+
command: PowerSyncControlCommand.PROCESS_BSON_LINE,
|
|
12373
|
+
payload: line
|
|
12374
|
+
};
|
|
12375
|
+
}
|
|
12376
|
+
}));
|
|
12087
12377
|
// The rust client will set connected: true after the first sync line because that's when it gets invoked, but
|
|
12088
12378
|
// we're already connected here and can report that.
|
|
12089
12379
|
syncImplementation.updateSyncStatus({ connected: true });
|
|
12090
12380
|
try {
|
|
12091
|
-
while (
|
|
12092
|
-
|
|
12093
|
-
if (
|
|
12094
|
-
|
|
12381
|
+
while (true) {
|
|
12382
|
+
let event = await controlInvocations.next();
|
|
12383
|
+
if (event.done) {
|
|
12384
|
+
break;
|
|
12095
12385
|
}
|
|
12386
|
+
const line = event.value;
|
|
12096
12387
|
await control(line.command, line.payload);
|
|
12097
12388
|
if (!hadSyncLine) {
|
|
12098
12389
|
syncImplementation.triggerCrudUpload();
|
|
@@ -12101,12 +12392,8 @@ The next upload iteration will be delayed.`);
|
|
|
12101
12392
|
}
|
|
12102
12393
|
}
|
|
12103
12394
|
finally {
|
|
12104
|
-
|
|
12105
|
-
|
|
12106
|
-
// refreshed. That would throw after closing (and we can't handle those events either way), so set this back
|
|
12107
|
-
// to null.
|
|
12108
|
-
controlInvocations = null;
|
|
12109
|
-
await activeInstructions.close();
|
|
12395
|
+
abort();
|
|
12396
|
+
signal.removeEventListener('abort', abort);
|
|
12110
12397
|
}
|
|
12111
12398
|
}
|
|
12112
12399
|
async function stop() {
|
|
@@ -12150,14 +12437,14 @@ The next upload iteration will be delayed.`);
|
|
|
12150
12437
|
remote.invalidateCredentials();
|
|
12151
12438
|
// Restart iteration after the credentials have been refreshed.
|
|
12152
12439
|
remote.fetchCredentials().then((_) => {
|
|
12153
|
-
controlInvocations?.
|
|
12440
|
+
controlInvocations?.inject({ command: PowerSyncControlCommand.NOTIFY_TOKEN_REFRESHED });
|
|
12154
12441
|
}, (err) => {
|
|
12155
12442
|
syncImplementation.logger.warn('Could not prefetch credentials', err);
|
|
12156
12443
|
});
|
|
12157
12444
|
}
|
|
12158
12445
|
}
|
|
12159
12446
|
else if ('CloseSyncStream' in instruction) {
|
|
12160
|
-
|
|
12447
|
+
controller.abort();
|
|
12161
12448
|
hideDisconnectOnRestart = instruction.CloseSyncStream.hide_disconnect;
|
|
12162
12449
|
}
|
|
12163
12450
|
else if ('FlushFileSystem' in instruction) ;
|
|
@@ -12186,17 +12473,13 @@ The next upload iteration will be delayed.`);
|
|
|
12186
12473
|
}
|
|
12187
12474
|
await control(PowerSyncControlCommand.START, JSON.stringify(options));
|
|
12188
12475
|
this.notifyCompletedUploads = () => {
|
|
12189
|
-
|
|
12190
|
-
controlInvocations.enqueueData({ command: PowerSyncControlCommand.NOTIFY_CRUD_UPLOAD_COMPLETED });
|
|
12191
|
-
}
|
|
12476
|
+
controlInvocations?.inject({ command: PowerSyncControlCommand.NOTIFY_CRUD_UPLOAD_COMPLETED });
|
|
12192
12477
|
};
|
|
12193
12478
|
this.handleActiveStreamsChange = () => {
|
|
12194
|
-
|
|
12195
|
-
|
|
12196
|
-
|
|
12197
|
-
|
|
12198
|
-
});
|
|
12199
|
-
}
|
|
12479
|
+
controlInvocations?.inject({
|
|
12480
|
+
command: PowerSyncControlCommand.UPDATE_SUBSCRIPTIONS,
|
|
12481
|
+
payload: JSON.stringify(this.activeStreams)
|
|
12482
|
+
});
|
|
12200
12483
|
};
|
|
12201
12484
|
await receivingLines;
|
|
12202
12485
|
}
|
|
@@ -12543,7 +12826,8 @@ class TriggerManagerImpl {
|
|
|
12543
12826
|
* we need to ensure we can cleanup the created resources.
|
|
12544
12827
|
* We unfortunately cannot rely on transaction rollback.
|
|
12545
12828
|
*/
|
|
12546
|
-
const cleanup = async (
|
|
12829
|
+
const cleanup = async (options) => {
|
|
12830
|
+
const { context } = options ?? {};
|
|
12547
12831
|
disposeWarningListener();
|
|
12548
12832
|
const doCleanup = async (tx) => {
|
|
12549
12833
|
await this.removeTriggers(tx, triggerIds);
|
|
@@ -12639,7 +12923,7 @@ class TriggerManagerImpl {
|
|
|
12639
12923
|
}
|
|
12640
12924
|
catch (error) {
|
|
12641
12925
|
try {
|
|
12642
|
-
await cleanup();
|
|
12926
|
+
await cleanup(setupContext ? { context: setupContext } : undefined);
|
|
12643
12927
|
}
|
|
12644
12928
|
catch (cleanupError) {
|
|
12645
12929
|
throw new AggregateError([error, cleanupError], 'Error during operation and cleanup');
|
|
@@ -13311,6 +13595,10 @@ SELECT * FROM crud_entries;
|
|
|
13311
13595
|
* Execute a SQL write (INSERT/UPDATE/DELETE) query
|
|
13312
13596
|
* and optionally return results.
|
|
13313
13597
|
*
|
|
13598
|
+
* When using the default client-side [JSON-based view system](https://docs.powersync.com/architecture/client-architecture#client-side-schema-and-sqlite-database-structure),
|
|
13599
|
+
* the returned result's `rowsAffected` may be `0` for successful `UPDATE` and `DELETE` statements.
|
|
13600
|
+
* Use a `RETURNING` clause and inspect `result.rows` when you need to confirm which rows changed.
|
|
13601
|
+
*
|
|
13314
13602
|
* @param sql The SQL query to execute
|
|
13315
13603
|
* @param parameters Optional array of parameters to bind to the query
|
|
13316
13604
|
* @returns The query result as an object with structured key-value pairs
|
|
@@ -13407,7 +13695,7 @@ SELECT * FROM crud_entries;
|
|
|
13407
13695
|
async readTransaction(callback, lockTimeout = DEFAULT_LOCK_TIMEOUT_MS) {
|
|
13408
13696
|
await this.waitForReady();
|
|
13409
13697
|
return this.database.readTransaction(async (tx) => {
|
|
13410
|
-
const res = await callback(
|
|
13698
|
+
const res = await callback(tx);
|
|
13411
13699
|
await tx.rollback();
|
|
13412
13700
|
return res;
|
|
13413
13701
|
}, { timeoutMs: lockTimeout });
|
|
@@ -14384,5 +14672,5 @@ const parseQuery = (query, parameters) => {
|
|
|
14384
14672
|
return { sqlStatement, parameters: parameters };
|
|
14385
14673
|
};
|
|
14386
14674
|
|
|
14387
|
-
export { ATTACHMENT_TABLE, AbortOperation, AbstractPowerSyncDatabase, AbstractPowerSyncDatabaseOpenFactory, AbstractQueryProcessor, AbstractRemote, AbstractStreamingSyncImplementation, ArrayComparator, AttachmentContext, AttachmentQueue, AttachmentService, AttachmentState, AttachmentTable, BaseObserver, Column, ColumnType, ConnectionClosedError, ConnectionManager, ControlledExecutor, CrudBatch, CrudEntry, CrudTransaction, DEFAULT_CRUD_BATCH_LIMIT, DEFAULT_CRUD_UPLOAD_THROTTLE_MS, DEFAULT_INDEX_COLUMN_OPTIONS, DEFAULT_INDEX_OPTIONS, DEFAULT_LOCK_TIMEOUT_MS, DEFAULT_POWERSYNC_CLOSE_OPTIONS, DEFAULT_POWERSYNC_DB_OPTIONS,
|
|
14675
|
+
export { ATTACHMENT_TABLE, AbortOperation, AbstractPowerSyncDatabase, AbstractPowerSyncDatabaseOpenFactory, AbstractQueryProcessor, AbstractRemote, AbstractStreamingSyncImplementation, ArrayComparator, AttachmentContext, AttachmentQueue, AttachmentService, AttachmentState, AttachmentTable, BaseObserver, Column, ColumnType, ConnectionClosedError, ConnectionManager, ControlledExecutor, CrudBatch, CrudEntry, CrudTransaction, DBAdapterDefaultMixin, DBGetUtilsDefaultMixin, DEFAULT_CRUD_BATCH_LIMIT, DEFAULT_CRUD_UPLOAD_THROTTLE_MS, DEFAULT_INDEX_COLUMN_OPTIONS, DEFAULT_INDEX_OPTIONS, DEFAULT_LOCK_TIMEOUT_MS, DEFAULT_POWERSYNC_CLOSE_OPTIONS, DEFAULT_POWERSYNC_DB_OPTIONS, DEFAULT_REMOTE_LOGGER, DEFAULT_REMOTE_OPTIONS, DEFAULT_RETRY_DELAY_MS, DEFAULT_ROW_COMPARATOR, DEFAULT_STREAMING_SYNC_OPTIONS, DEFAULT_STREAM_CONNECTION_OPTIONS, DEFAULT_SYNC_CLIENT_IMPLEMENTATION, DEFAULT_TABLE_OPTIONS, DEFAULT_WATCH_QUERY_OPTIONS, DEFAULT_WATCH_THROTTLE_MS, DiffTriggerOperation, DifferentialQueryProcessor, EMPTY_DIFFERENTIAL, EncodingType, FalsyComparator, FetchImplementationProvider, FetchStrategy, GetAllQuery, Index, IndexedColumn, InvalidSQLCharacters, LockType, LogLevel, MAX_AMOUNT_OF_COLUMNS, MAX_OP_ID, MEMORY_TRIGGER_CLAIM_MANAGER, Mutex, OnChangeQueryProcessor, OpType, OpTypeEnum, OplogEntry, PSInternalTable, PowerSyncControlCommand, RowUpdateType, Schema, Semaphore, SqliteBucketStorage, SyncClientImplementation, SyncDataBatch, SyncDataBucket, SyncProgress, SyncStatus, SyncStreamConnectionMethod, SyncingService, Table, TableV2, TriggerManagerImpl, UpdateType, UploadQueueStats, WatchedQueryListenerEvent, attachmentFromSql, column, compilableQueryWatch, createBaseLogger, createLogger, extractTableUpdates, isBatchedUpdateNotification, isContinueCheckpointRequest, isDBAdapter, isPowerSyncDatabaseOptionsWithSettings, isSQLOpenFactory, isSQLOpenOptions, isStreamingKeepalive, isStreamingSyncCheckpoint, isStreamingSyncCheckpointComplete, isStreamingSyncCheckpointDiff, isStreamingSyncCheckpointPartiallyComplete, isStreamingSyncData, isSyncNewCheckpointRequest, parseQuery, runOnSchemaChange, sanitizeSQL, sanitizeUUID, timeoutSignal };
|
|
14388
14676
|
//# sourceMappingURL=bundle.mjs.map
|