@powersync/common 0.0.0-dev-20260311081226 → 0.0.0-dev-20260414110516
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bundle.cjs +775 -485
- package/dist/bundle.cjs.map +1 -1
- package/dist/bundle.mjs +769 -481
- package/dist/bundle.mjs.map +1 -1
- package/dist/bundle.node.cjs +773 -484
- package/dist/bundle.node.cjs.map +1 -1
- package/dist/bundle.node.mjs +767 -480
- package/dist/bundle.node.mjs.map +1 -1
- package/dist/index.d.cts +175 -94
- package/lib/attachments/AttachmentQueue.d.ts +10 -4
- package/lib/attachments/AttachmentQueue.js +10 -4
- package/lib/attachments/AttachmentQueue.js.map +1 -1
- package/lib/attachments/AttachmentService.js +2 -3
- package/lib/attachments/AttachmentService.js.map +1 -1
- package/lib/attachments/SyncingService.d.ts +2 -1
- package/lib/attachments/SyncingService.js +4 -5
- package/lib/attachments/SyncingService.js.map +1 -1
- package/lib/client/AbstractPowerSyncDatabase.d.ts +5 -1
- package/lib/client/AbstractPowerSyncDatabase.js +9 -5
- package/lib/client/AbstractPowerSyncDatabase.js.map +1 -1
- package/lib/client/sync/stream/AbstractRemote.d.ts +29 -8
- package/lib/client/sync/stream/AbstractRemote.js +154 -177
- package/lib/client/sync/stream/AbstractRemote.js.map +1 -1
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.d.ts +1 -0
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js +69 -88
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js.map +1 -1
- package/lib/client/triggers/TriggerManager.d.ts +12 -1
- package/lib/client/triggers/TriggerManagerImpl.d.ts +2 -2
- package/lib/client/triggers/TriggerManagerImpl.js +3 -2
- package/lib/client/triggers/TriggerManagerImpl.js.map +1 -1
- package/lib/db/DBAdapter.d.ts +55 -9
- package/lib/db/DBAdapter.js +126 -0
- package/lib/db/DBAdapter.js.map +1 -1
- package/lib/index.d.ts +1 -1
- package/lib/index.js +0 -1
- package/lib/index.js.map +1 -1
- package/lib/utils/async.d.ts +0 -9
- package/lib/utils/async.js +0 -9
- package/lib/utils/async.js.map +1 -1
- package/lib/utils/mutex.d.ts +47 -5
- package/lib/utils/mutex.js +146 -21
- package/lib/utils/mutex.js.map +1 -1
- package/lib/utils/queue.d.ts +16 -0
- package/lib/utils/queue.js +42 -0
- package/lib/utils/queue.js.map +1 -0
- package/lib/utils/stream_transform.d.ts +39 -0
- package/lib/utils/stream_transform.js +206 -0
- package/lib/utils/stream_transform.js.map +1 -0
- package/package.json +9 -8
- package/src/attachments/AttachmentQueue.ts +10 -4
- package/src/attachments/AttachmentService.ts +2 -3
- package/src/attachments/README.md +6 -4
- package/src/attachments/SyncingService.ts +4 -5
- package/src/client/AbstractPowerSyncDatabase.ts +9 -5
- package/src/client/sync/stream/AbstractRemote.ts +182 -206
- package/src/client/sync/stream/AbstractStreamingSyncImplementation.ts +82 -83
- package/src/client/triggers/TriggerManager.ts +13 -1
- package/src/client/triggers/TriggerManagerImpl.ts +4 -2
- package/src/db/DBAdapter.ts +167 -9
- package/src/index.ts +1 -1
- package/src/utils/async.ts +0 -11
- package/src/utils/mutex.ts +184 -26
- package/src/utils/queue.ts +48 -0
- package/src/utils/stream_transform.ts +252 -0
- package/lib/utils/DataStream.d.ts +0 -62
- package/lib/utils/DataStream.js +0 -169
- package/lib/utils/DataStream.js.map +0 -1
- package/src/utils/DataStream.ts +0 -222
package/dist/bundle.node.mjs
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import { Mutex } from 'async-mutex';
|
|
2
1
|
import { EventIterator } from 'event-iterator';
|
|
3
2
|
import { Buffer } from 'node:buffer';
|
|
4
3
|
|
|
@@ -659,7 +658,7 @@ class SyncingService {
|
|
|
659
658
|
updatedAttachments.push(downloaded);
|
|
660
659
|
break;
|
|
661
660
|
case AttachmentState.QUEUED_DELETE:
|
|
662
|
-
const deleted = await this.deleteAttachment(attachment);
|
|
661
|
+
const deleted = await this.deleteAttachment(attachment, context);
|
|
663
662
|
updatedAttachments.push(deleted);
|
|
664
663
|
break;
|
|
665
664
|
}
|
|
@@ -737,17 +736,16 @@ class SyncingService {
|
|
|
737
736
|
* On failure, defers to error handler or archives.
|
|
738
737
|
*
|
|
739
738
|
* @param attachment - The attachment record to delete
|
|
739
|
+
* @param context - Attachment context for database operations
|
|
740
740
|
* @returns Updated attachment record
|
|
741
741
|
*/
|
|
742
|
-
async deleteAttachment(attachment) {
|
|
742
|
+
async deleteAttachment(attachment, context) {
|
|
743
743
|
try {
|
|
744
744
|
await this.remoteStorage.deleteFile(attachment);
|
|
745
745
|
if (attachment.localUri) {
|
|
746
746
|
await this.localStorage.deleteFile(attachment.localUri);
|
|
747
747
|
}
|
|
748
|
-
await
|
|
749
|
-
await ctx.deleteAttachment(attachment.id);
|
|
750
|
-
});
|
|
748
|
+
await context.deleteAttachment(attachment.id);
|
|
751
749
|
return {
|
|
752
750
|
...attachment,
|
|
753
751
|
state: AttachmentState.ARCHIVED
|
|
@@ -785,32 +783,198 @@ class SyncingService {
|
|
|
785
783
|
}
|
|
786
784
|
|
|
787
785
|
/**
|
|
788
|
-
*
|
|
786
|
+
* A simple fixed-capacity queue implementation.
|
|
787
|
+
*
|
|
788
|
+
* Unlike a naive queue implemented by `array.push()` and `array.shift()`, this avoids moving array elements around
|
|
789
|
+
* and is `O(1)` for {@link addLast} and {@link removeFirst}.
|
|
789
790
|
*/
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
791
|
+
class Queue {
|
|
792
|
+
table;
|
|
793
|
+
// Index of the first element in the table.
|
|
794
|
+
head;
|
|
795
|
+
// Amount of items currently in the queue.
|
|
796
|
+
_length;
|
|
797
|
+
constructor(initialItems) {
|
|
798
|
+
this.table = [...initialItems];
|
|
799
|
+
this.head = 0;
|
|
800
|
+
this._length = this.table.length;
|
|
801
|
+
}
|
|
802
|
+
get isEmpty() {
|
|
803
|
+
return this.length == 0;
|
|
804
|
+
}
|
|
805
|
+
get length() {
|
|
806
|
+
return this._length;
|
|
807
|
+
}
|
|
808
|
+
removeFirst() {
|
|
809
|
+
if (this.isEmpty) {
|
|
810
|
+
throw new Error('Queue is empty');
|
|
811
|
+
}
|
|
812
|
+
const result = this.table[this.head];
|
|
813
|
+
this._length--;
|
|
814
|
+
this.table[this.head] = undefined;
|
|
815
|
+
this.head = (this.head + 1) % this.table.length;
|
|
816
|
+
return result;
|
|
817
|
+
}
|
|
818
|
+
addLast(element) {
|
|
819
|
+
if (this.length == this.table.length) {
|
|
820
|
+
throw new Error('Queue is full');
|
|
821
|
+
}
|
|
822
|
+
this.table[(this.head + this._length) % this.table.length] = element;
|
|
823
|
+
this._length++;
|
|
824
|
+
}
|
|
825
|
+
}
|
|
826
|
+
|
|
827
|
+
/**
|
|
828
|
+
* An asynchronous semaphore implementation with associated items per lease.
|
|
829
|
+
*
|
|
830
|
+
* @internal This class is meant to be used in PowerSync SDKs only, and is not part of the public API.
|
|
831
|
+
*/
|
|
832
|
+
class Semaphore {
|
|
833
|
+
// Available items that are not currently assigned to a waiter.
|
|
834
|
+
available;
|
|
835
|
+
size;
|
|
836
|
+
// Linked list of waiters. We don't expect the wait list to become particularly large, and this allows removing
|
|
837
|
+
// aborted waiters from the middle of the list efficiently.
|
|
838
|
+
firstWaiter;
|
|
839
|
+
lastWaiter;
|
|
840
|
+
constructor(elements) {
|
|
841
|
+
this.available = new Queue(elements);
|
|
842
|
+
this.size = this.available.length;
|
|
843
|
+
}
|
|
844
|
+
addWaiter(requestedItems, onAcquire) {
|
|
845
|
+
const node = {
|
|
846
|
+
isActive: true,
|
|
847
|
+
acquiredItems: [],
|
|
848
|
+
remainingItems: requestedItems,
|
|
849
|
+
onAcquire,
|
|
850
|
+
prev: this.lastWaiter
|
|
851
|
+
};
|
|
852
|
+
if (this.lastWaiter) {
|
|
853
|
+
this.lastWaiter.next = node;
|
|
854
|
+
this.lastWaiter = node;
|
|
855
|
+
}
|
|
856
|
+
else {
|
|
857
|
+
// First waiter
|
|
858
|
+
this.lastWaiter = this.firstWaiter = node;
|
|
859
|
+
}
|
|
860
|
+
return node;
|
|
861
|
+
}
|
|
862
|
+
deactivateWaiter(waiter) {
|
|
863
|
+
const { prev, next } = waiter;
|
|
864
|
+
waiter.isActive = false;
|
|
865
|
+
if (prev)
|
|
866
|
+
prev.next = next;
|
|
867
|
+
if (next)
|
|
868
|
+
next.prev = prev;
|
|
869
|
+
if (waiter == this.firstWaiter)
|
|
870
|
+
this.firstWaiter = next;
|
|
871
|
+
if (waiter == this.lastWaiter)
|
|
872
|
+
this.lastWaiter = prev;
|
|
873
|
+
}
|
|
874
|
+
requestPermits(amount, abort) {
|
|
875
|
+
if (amount <= 0 || amount > this.size) {
|
|
876
|
+
throw new Error(`Invalid amount of items requested (${amount}), must be between 1 and ${this.size}`);
|
|
877
|
+
}
|
|
878
|
+
return new Promise((resolve, reject) => {
|
|
879
|
+
function rejectAborted() {
|
|
880
|
+
reject(abort?.reason ?? new Error('Semaphore acquire aborted'));
|
|
881
|
+
}
|
|
882
|
+
if (abort?.aborted) {
|
|
883
|
+
return rejectAborted();
|
|
884
|
+
}
|
|
885
|
+
let waiter;
|
|
886
|
+
const markCompleted = () => {
|
|
887
|
+
const items = waiter.acquiredItems;
|
|
888
|
+
waiter.acquiredItems = []; // Avoid releasing items twice.
|
|
889
|
+
for (const element of items) {
|
|
890
|
+
// Give to next waiter, if possible.
|
|
891
|
+
const nextWaiter = this.firstWaiter;
|
|
892
|
+
if (nextWaiter) {
|
|
893
|
+
nextWaiter.acquiredItems.push(element);
|
|
894
|
+
nextWaiter.remainingItems--;
|
|
895
|
+
if (nextWaiter.remainingItems == 0) {
|
|
896
|
+
nextWaiter.onAcquire();
|
|
897
|
+
}
|
|
898
|
+
}
|
|
899
|
+
else {
|
|
900
|
+
// No pending waiter, return lease into pool.
|
|
901
|
+
this.available.addLast(element);
|
|
902
|
+
}
|
|
903
|
+
}
|
|
904
|
+
};
|
|
905
|
+
const onAbort = () => {
|
|
906
|
+
abort?.removeEventListener('abort', onAbort);
|
|
907
|
+
if (waiter.isActive) {
|
|
908
|
+
this.deactivateWaiter(waiter);
|
|
909
|
+
rejectAborted();
|
|
910
|
+
}
|
|
911
|
+
};
|
|
912
|
+
const resolvePromise = () => {
|
|
913
|
+
this.deactivateWaiter(waiter);
|
|
914
|
+
abort?.removeEventListener('abort', onAbort);
|
|
915
|
+
const items = waiter.acquiredItems;
|
|
916
|
+
resolve({ items, release: markCompleted });
|
|
917
|
+
};
|
|
918
|
+
waiter = this.addWaiter(amount, resolvePromise);
|
|
919
|
+
// If there are items in the pool that haven't been assigned, we can pull them into this waiter. Note that this is
|
|
920
|
+
// only the case if we're the first waiter (otherwise, items would have been assigned to an earlier waiter).
|
|
921
|
+
while (!this.available.isEmpty && waiter.remainingItems > 0) {
|
|
922
|
+
waiter.acquiredItems.push(this.available.removeFirst());
|
|
923
|
+
waiter.remainingItems--;
|
|
808
924
|
}
|
|
809
|
-
|
|
810
|
-
|
|
925
|
+
if (waiter.remainingItems == 0) {
|
|
926
|
+
return resolvePromise();
|
|
811
927
|
}
|
|
928
|
+
abort?.addEventListener('abort', onAbort);
|
|
812
929
|
});
|
|
813
|
-
}
|
|
930
|
+
}
|
|
931
|
+
/**
|
|
932
|
+
* Requests a single item from the pool.
|
|
933
|
+
*
|
|
934
|
+
* The returned `release` callback must be invoked to return the item into the pool.
|
|
935
|
+
*/
|
|
936
|
+
async requestOne(abort) {
|
|
937
|
+
const { items, release } = await this.requestPermits(1, abort);
|
|
938
|
+
return { release, item: items[0] };
|
|
939
|
+
}
|
|
940
|
+
/**
|
|
941
|
+
* Requests access to all items from the pool.
|
|
942
|
+
*
|
|
943
|
+
* The returned `release` callback must be invoked to return items into the pool.
|
|
944
|
+
*/
|
|
945
|
+
requestAll(abort) {
|
|
946
|
+
return this.requestPermits(this.size, abort);
|
|
947
|
+
}
|
|
948
|
+
}
|
|
949
|
+
/**
|
|
950
|
+
* An asynchronous mutex implementation.
|
|
951
|
+
*
|
|
952
|
+
* @internal This class is meant to be used in PowerSync SDKs only, and is not part of the public API.
|
|
953
|
+
*/
|
|
954
|
+
class Mutex {
|
|
955
|
+
inner = new Semaphore([null]);
|
|
956
|
+
async acquire(abort) {
|
|
957
|
+
const { release } = await this.inner.requestOne(abort);
|
|
958
|
+
return release;
|
|
959
|
+
}
|
|
960
|
+
async runExclusive(fn, abort) {
|
|
961
|
+
const returnMutex = await this.acquire(abort);
|
|
962
|
+
try {
|
|
963
|
+
return await fn();
|
|
964
|
+
}
|
|
965
|
+
finally {
|
|
966
|
+
returnMutex();
|
|
967
|
+
}
|
|
968
|
+
}
|
|
969
|
+
}
|
|
970
|
+
function timeoutSignal(timeout) {
|
|
971
|
+
if (timeout == null)
|
|
972
|
+
return;
|
|
973
|
+
if ('timeout' in AbortSignal)
|
|
974
|
+
return AbortSignal.timeout(timeout);
|
|
975
|
+
const controller = new AbortController();
|
|
976
|
+
setTimeout(() => controller.abort(new Error('Timeout waiting for lock')), timeout);
|
|
977
|
+
return controller.signal;
|
|
814
978
|
}
|
|
815
979
|
|
|
816
980
|
/**
|
|
@@ -859,7 +1023,7 @@ class AttachmentService {
|
|
|
859
1023
|
* Executes a callback with exclusive access to the attachment context.
|
|
860
1024
|
*/
|
|
861
1025
|
async withContext(callback) {
|
|
862
|
-
return
|
|
1026
|
+
return this.mutex.runExclusive(async () => {
|
|
863
1027
|
return callback(this.context);
|
|
864
1028
|
});
|
|
865
1029
|
}
|
|
@@ -895,9 +1059,15 @@ class AttachmentQueue {
|
|
|
895
1059
|
tableName;
|
|
896
1060
|
/** Logger instance for diagnostic information */
|
|
897
1061
|
logger;
|
|
898
|
-
/** Interval in milliseconds between periodic sync operations.
|
|
1062
|
+
/** Interval in milliseconds between periodic sync operations. Acts as a polling timer to retry
|
|
1063
|
+
* failed uploads/downloads, especially after the app goes offline. Default: 30000 (30 seconds) */
|
|
899
1064
|
syncIntervalMs = 30 * 1000;
|
|
900
|
-
/**
|
|
1065
|
+
/** Throttle duration in milliseconds for the reactive watch query on the attachments table.
|
|
1066
|
+
* When attachment records change, a watch query detects the change and triggers a sync.
|
|
1067
|
+
* This throttle prevents the sync from firing too rapidly when many changes happen in
|
|
1068
|
+
* quick succession (e.g., bulk inserts). This is distinct from syncIntervalMs — it controls
|
|
1069
|
+
* how quickly the queue reacts to changes, while syncIntervalMs controls how often it polls
|
|
1070
|
+
* for retries. Default: 30 (from DEFAULT_WATCH_THROTTLE_MS) */
|
|
901
1071
|
syncThrottleDuration;
|
|
902
1072
|
/** Whether to automatically download remote attachments. Default: true */
|
|
903
1073
|
downloadAttachments = true;
|
|
@@ -921,8 +1091,8 @@ class AttachmentQueue {
|
|
|
921
1091
|
* @param options.watchAttachments - Callback for monitoring attachment changes in your data model
|
|
922
1092
|
* @param options.tableName - Name of the table to store attachment records. Default: 'ps_attachment_queue'
|
|
923
1093
|
* @param options.logger - Logger instance. Defaults to db.logger
|
|
924
|
-
* @param options.syncIntervalMs -
|
|
925
|
-
* @param options.syncThrottleDuration - Throttle duration for
|
|
1094
|
+
* @param options.syncIntervalMs - Periodic polling interval in milliseconds for retrying failed uploads/downloads. Default: 30000
|
|
1095
|
+
* @param options.syncThrottleDuration - Throttle duration in milliseconds for the reactive watch query that detects attachment changes. Prevents rapid-fire syncs during bulk changes. Default: 30
|
|
926
1096
|
* @param options.downloadAttachments - Whether to automatically download remote attachments. Default: true
|
|
927
1097
|
* @param options.archivedCacheLimit - Maximum archived attachments before cleanup. Default: 100
|
|
928
1098
|
*/
|
|
@@ -1530,6 +1700,49 @@ var Logger = /*@__PURE__*/getDefaultExportFromCjs(loggerExports);
|
|
|
1530
1700
|
* Set of generic interfaces to allow PowerSync compatibility with
|
|
1531
1701
|
* different SQLite DB implementations.
|
|
1532
1702
|
*/
|
|
1703
|
+
/**
|
|
1704
|
+
* Implements {@link DBGetUtils} on a {@link SqlRunner}.
|
|
1705
|
+
*/
|
|
1706
|
+
function DBGetUtilsDefaultMixin(Base) {
|
|
1707
|
+
return class extends Base {
|
|
1708
|
+
async getAll(sql, parameters) {
|
|
1709
|
+
const res = await this.execute(sql, parameters);
|
|
1710
|
+
return res.rows?._array ?? [];
|
|
1711
|
+
}
|
|
1712
|
+
async getOptional(sql, parameters) {
|
|
1713
|
+
const res = await this.execute(sql, parameters);
|
|
1714
|
+
return res.rows?.item(0) ?? null;
|
|
1715
|
+
}
|
|
1716
|
+
async get(sql, parameters) {
|
|
1717
|
+
const res = await this.execute(sql, parameters);
|
|
1718
|
+
const first = res.rows?.item(0);
|
|
1719
|
+
if (!first) {
|
|
1720
|
+
throw new Error('Result set is empty');
|
|
1721
|
+
}
|
|
1722
|
+
return first;
|
|
1723
|
+
}
|
|
1724
|
+
async executeBatch(query, params = []) {
|
|
1725
|
+
// If this context can run batch statements natively, use that.
|
|
1726
|
+
// @ts-ignore
|
|
1727
|
+
if (super.executeBatch) {
|
|
1728
|
+
// @ts-ignore
|
|
1729
|
+
return super.executeBatch(query, params);
|
|
1730
|
+
}
|
|
1731
|
+
// Emulate executeBatch by running statements individually.
|
|
1732
|
+
let lastInsertId;
|
|
1733
|
+
let rowsAffected = 0;
|
|
1734
|
+
for (const set of params) {
|
|
1735
|
+
const result = await this.execute(query, set);
|
|
1736
|
+
lastInsertId = result.insertId;
|
|
1737
|
+
rowsAffected += result.rowsAffected;
|
|
1738
|
+
}
|
|
1739
|
+
return {
|
|
1740
|
+
rowsAffected,
|
|
1741
|
+
insertId: lastInsertId
|
|
1742
|
+
};
|
|
1743
|
+
}
|
|
1744
|
+
};
|
|
1745
|
+
}
|
|
1533
1746
|
/**
|
|
1534
1747
|
* Update table operation numbers from SQLite
|
|
1535
1748
|
*/
|
|
@@ -1539,6 +1752,89 @@ var RowUpdateType;
|
|
|
1539
1752
|
RowUpdateType[RowUpdateType["SQLITE_DELETE"] = 9] = "SQLITE_DELETE";
|
|
1540
1753
|
RowUpdateType[RowUpdateType["SQLITE_UPDATE"] = 23] = "SQLITE_UPDATE";
|
|
1541
1754
|
})(RowUpdateType || (RowUpdateType = {}));
|
|
1755
|
+
/**
|
|
1756
|
+
* A mixin to implement {@link DBAdapter} by delegating to {@link ConnectionPool.readLock} and
|
|
1757
|
+
* {@link ConnectionPool.writeLock}.
|
|
1758
|
+
*/
|
|
1759
|
+
function DBAdapterDefaultMixin(Base) {
|
|
1760
|
+
return class extends Base {
|
|
1761
|
+
readTransaction(fn, options) {
|
|
1762
|
+
return this.readLock((ctx) => TransactionImplementation.runWith(ctx, fn), options);
|
|
1763
|
+
}
|
|
1764
|
+
writeTransaction(fn, options) {
|
|
1765
|
+
return this.writeLock((ctx) => TransactionImplementation.runWith(ctx, fn), options);
|
|
1766
|
+
}
|
|
1767
|
+
getAll(sql, parameters) {
|
|
1768
|
+
return this.readLock((ctx) => ctx.getAll(sql, parameters));
|
|
1769
|
+
}
|
|
1770
|
+
getOptional(sql, parameters) {
|
|
1771
|
+
return this.readLock((ctx) => ctx.getOptional(sql, parameters));
|
|
1772
|
+
}
|
|
1773
|
+
get(sql, parameters) {
|
|
1774
|
+
return this.readLock((ctx) => ctx.get(sql, parameters));
|
|
1775
|
+
}
|
|
1776
|
+
execute(query, params) {
|
|
1777
|
+
return this.writeLock((ctx) => ctx.execute(query, params));
|
|
1778
|
+
}
|
|
1779
|
+
executeRaw(query, params) {
|
|
1780
|
+
return this.writeLock((ctx) => ctx.executeRaw(query, params));
|
|
1781
|
+
}
|
|
1782
|
+
executeBatch(query, params) {
|
|
1783
|
+
return this.writeTransaction((tx) => tx.executeBatch(query, params));
|
|
1784
|
+
}
|
|
1785
|
+
};
|
|
1786
|
+
}
|
|
1787
|
+
class BaseTransaction {
|
|
1788
|
+
inner;
|
|
1789
|
+
finalized = false;
|
|
1790
|
+
constructor(inner) {
|
|
1791
|
+
this.inner = inner;
|
|
1792
|
+
}
|
|
1793
|
+
async commit() {
|
|
1794
|
+
if (this.finalized) {
|
|
1795
|
+
return { rowsAffected: 0 };
|
|
1796
|
+
}
|
|
1797
|
+
this.finalized = true;
|
|
1798
|
+
return this.inner.execute('COMMIT');
|
|
1799
|
+
}
|
|
1800
|
+
async rollback() {
|
|
1801
|
+
if (this.finalized) {
|
|
1802
|
+
return { rowsAffected: 0 };
|
|
1803
|
+
}
|
|
1804
|
+
this.finalized = true;
|
|
1805
|
+
return this.inner.execute('ROLLBACK');
|
|
1806
|
+
}
|
|
1807
|
+
execute(query, params) {
|
|
1808
|
+
return this.inner.execute(query, params);
|
|
1809
|
+
}
|
|
1810
|
+
executeRaw(query, params) {
|
|
1811
|
+
return this.inner.executeRaw(query, params);
|
|
1812
|
+
}
|
|
1813
|
+
executeBatch(query, params) {
|
|
1814
|
+
return this.inner.executeBatch(query, params);
|
|
1815
|
+
}
|
|
1816
|
+
}
|
|
1817
|
+
class TransactionImplementation extends DBGetUtilsDefaultMixin(BaseTransaction) {
|
|
1818
|
+
static async runWith(ctx, fn) {
|
|
1819
|
+
let tx = new TransactionImplementation(ctx);
|
|
1820
|
+
try {
|
|
1821
|
+
await ctx.execute('BEGIN IMMEDIATE');
|
|
1822
|
+
const result = await fn(tx);
|
|
1823
|
+
await tx.commit();
|
|
1824
|
+
return result;
|
|
1825
|
+
}
|
|
1826
|
+
catch (ex) {
|
|
1827
|
+
try {
|
|
1828
|
+
await tx.rollback();
|
|
1829
|
+
}
|
|
1830
|
+
catch (ex2) {
|
|
1831
|
+
// In rare cases, a rollback may fail.
|
|
1832
|
+
// Safe to ignore.
|
|
1833
|
+
}
|
|
1834
|
+
throw ex;
|
|
1835
|
+
}
|
|
1836
|
+
}
|
|
1837
|
+
}
|
|
1542
1838
|
function isBatchedUpdateNotification(update) {
|
|
1543
1839
|
return 'tables' in update;
|
|
1544
1840
|
}
|
|
@@ -1959,15 +2255,6 @@ class ControlledExecutor {
|
|
|
1959
2255
|
}
|
|
1960
2256
|
}
|
|
1961
2257
|
|
|
1962
|
-
/**
|
|
1963
|
-
* A ponyfill for `Symbol.asyncIterator` that is compatible with the
|
|
1964
|
-
* [recommended polyfill](https://github.com/Azure/azure-sdk-for-js/blob/%40azure/core-asynciterator-polyfill_1.0.2/sdk/core/core-asynciterator-polyfill/src/index.ts#L4-L6)
|
|
1965
|
-
* we recommend for React Native.
|
|
1966
|
-
*
|
|
1967
|
-
* As long as we use this symbol (instead of `for await` and `async *`) in this package, we can be compatible with async
|
|
1968
|
-
* iterators without requiring them.
|
|
1969
|
-
*/
|
|
1970
|
-
const symbolAsyncIterator = Symbol.asyncIterator ?? Symbol.for('Symbol.asyncIterator');
|
|
1971
2258
|
/**
|
|
1972
2259
|
* Throttle a function to be called at most once every "wait" milliseconds,
|
|
1973
2260
|
* on the trailing edge.
|
|
@@ -7933,177 +8220,10 @@ function requireDist () {
|
|
|
7933
8220
|
|
|
7934
8221
|
var distExports = requireDist();
|
|
7935
8222
|
|
|
7936
|
-
var version = "1.
|
|
8223
|
+
var version = "1.51.0";
|
|
7937
8224
|
var PACKAGE = {
|
|
7938
8225
|
version: version};
|
|
7939
8226
|
|
|
7940
|
-
const DEFAULT_PRESSURE_LIMITS = {
|
|
7941
|
-
highWater: 10,
|
|
7942
|
-
lowWater: 0
|
|
7943
|
-
};
|
|
7944
|
-
/**
|
|
7945
|
-
* A very basic implementation of a data stream with backpressure support which does not use
|
|
7946
|
-
* native JS streams or async iterators.
|
|
7947
|
-
* This is handy for environments such as React Native which need polyfills for the above.
|
|
7948
|
-
*/
|
|
7949
|
-
class DataStream extends BaseObserver {
|
|
7950
|
-
options;
|
|
7951
|
-
dataQueue;
|
|
7952
|
-
isClosed;
|
|
7953
|
-
processingPromise;
|
|
7954
|
-
notifyDataAdded;
|
|
7955
|
-
logger;
|
|
7956
|
-
mapLine;
|
|
7957
|
-
constructor(options) {
|
|
7958
|
-
super();
|
|
7959
|
-
this.options = options;
|
|
7960
|
-
this.processingPromise = null;
|
|
7961
|
-
this.isClosed = false;
|
|
7962
|
-
this.dataQueue = [];
|
|
7963
|
-
this.mapLine = options?.mapLine ?? ((line) => line);
|
|
7964
|
-
this.logger = options?.logger ?? Logger.get('DataStream');
|
|
7965
|
-
if (options?.closeOnError) {
|
|
7966
|
-
const l = this.registerListener({
|
|
7967
|
-
error: (ex) => {
|
|
7968
|
-
l?.();
|
|
7969
|
-
this.close();
|
|
7970
|
-
}
|
|
7971
|
-
});
|
|
7972
|
-
}
|
|
7973
|
-
}
|
|
7974
|
-
get highWatermark() {
|
|
7975
|
-
return this.options?.pressure?.highWaterMark ?? DEFAULT_PRESSURE_LIMITS.highWater;
|
|
7976
|
-
}
|
|
7977
|
-
get lowWatermark() {
|
|
7978
|
-
return this.options?.pressure?.lowWaterMark ?? DEFAULT_PRESSURE_LIMITS.lowWater;
|
|
7979
|
-
}
|
|
7980
|
-
get closed() {
|
|
7981
|
-
return this.isClosed;
|
|
7982
|
-
}
|
|
7983
|
-
async close() {
|
|
7984
|
-
this.isClosed = true;
|
|
7985
|
-
await this.processingPromise;
|
|
7986
|
-
this.iterateListeners((l) => l.closed?.());
|
|
7987
|
-
// Discard any data in the queue
|
|
7988
|
-
this.dataQueue = [];
|
|
7989
|
-
this.listeners.clear();
|
|
7990
|
-
}
|
|
7991
|
-
/**
|
|
7992
|
-
* Enqueues data for the consumers to read
|
|
7993
|
-
*/
|
|
7994
|
-
enqueueData(data) {
|
|
7995
|
-
if (this.isClosed) {
|
|
7996
|
-
throw new Error('Cannot enqueue data into closed stream.');
|
|
7997
|
-
}
|
|
7998
|
-
this.dataQueue.push(data);
|
|
7999
|
-
this.notifyDataAdded?.();
|
|
8000
|
-
this.processQueue();
|
|
8001
|
-
}
|
|
8002
|
-
/**
|
|
8003
|
-
* Reads data once from the data stream
|
|
8004
|
-
* @returns a Data payload or Null if the stream closed.
|
|
8005
|
-
*/
|
|
8006
|
-
async read() {
|
|
8007
|
-
if (this.closed) {
|
|
8008
|
-
return null;
|
|
8009
|
-
}
|
|
8010
|
-
// Wait for any pending processing to complete first.
|
|
8011
|
-
// This ensures we register our listener before calling processQueue(),
|
|
8012
|
-
// avoiding a race where processQueue() sees no reader and returns early.
|
|
8013
|
-
if (this.processingPromise) {
|
|
8014
|
-
await this.processingPromise;
|
|
8015
|
-
}
|
|
8016
|
-
// Re-check after await - stream may have closed while we were waiting
|
|
8017
|
-
if (this.closed) {
|
|
8018
|
-
return null;
|
|
8019
|
-
}
|
|
8020
|
-
return new Promise((resolve, reject) => {
|
|
8021
|
-
const l = this.registerListener({
|
|
8022
|
-
data: async (data) => {
|
|
8023
|
-
resolve(data);
|
|
8024
|
-
// Remove the listener
|
|
8025
|
-
l?.();
|
|
8026
|
-
},
|
|
8027
|
-
closed: () => {
|
|
8028
|
-
resolve(null);
|
|
8029
|
-
l?.();
|
|
8030
|
-
},
|
|
8031
|
-
error: (ex) => {
|
|
8032
|
-
reject(ex);
|
|
8033
|
-
l?.();
|
|
8034
|
-
}
|
|
8035
|
-
});
|
|
8036
|
-
this.processQueue();
|
|
8037
|
-
});
|
|
8038
|
-
}
|
|
8039
|
-
/**
|
|
8040
|
-
* Executes a callback for each data item in the stream
|
|
8041
|
-
*/
|
|
8042
|
-
forEach(callback) {
|
|
8043
|
-
if (this.dataQueue.length <= this.lowWatermark) {
|
|
8044
|
-
this.iterateAsyncErrored(async (l) => l.lowWater?.());
|
|
8045
|
-
}
|
|
8046
|
-
return this.registerListener({
|
|
8047
|
-
data: callback
|
|
8048
|
-
});
|
|
8049
|
-
}
|
|
8050
|
-
processQueue() {
|
|
8051
|
-
if (this.processingPromise) {
|
|
8052
|
-
return;
|
|
8053
|
-
}
|
|
8054
|
-
const promise = (this.processingPromise = this._processQueue());
|
|
8055
|
-
promise.finally(() => {
|
|
8056
|
-
this.processingPromise = null;
|
|
8057
|
-
});
|
|
8058
|
-
return promise;
|
|
8059
|
-
}
|
|
8060
|
-
hasDataReader() {
|
|
8061
|
-
return Array.from(this.listeners.values()).some((l) => !!l.data);
|
|
8062
|
-
}
|
|
8063
|
-
async _processQueue() {
|
|
8064
|
-
/**
|
|
8065
|
-
* Allow listeners to mutate the queue before processing.
|
|
8066
|
-
* This allows for operations such as dropping or compressing data
|
|
8067
|
-
* on high water or requesting more data on low water.
|
|
8068
|
-
*/
|
|
8069
|
-
if (this.dataQueue.length >= this.highWatermark) {
|
|
8070
|
-
await this.iterateAsyncErrored(async (l) => l.highWater?.());
|
|
8071
|
-
}
|
|
8072
|
-
if (this.isClosed || !this.hasDataReader()) {
|
|
8073
|
-
return;
|
|
8074
|
-
}
|
|
8075
|
-
if (this.dataQueue.length) {
|
|
8076
|
-
const data = this.dataQueue.shift();
|
|
8077
|
-
const mapped = this.mapLine(data);
|
|
8078
|
-
await this.iterateAsyncErrored(async (l) => l.data?.(mapped));
|
|
8079
|
-
}
|
|
8080
|
-
if (this.dataQueue.length <= this.lowWatermark) {
|
|
8081
|
-
const dataAdded = new Promise((resolve) => {
|
|
8082
|
-
this.notifyDataAdded = resolve;
|
|
8083
|
-
});
|
|
8084
|
-
await Promise.race([this.iterateAsyncErrored(async (l) => l.lowWater?.()), dataAdded]);
|
|
8085
|
-
this.notifyDataAdded = null;
|
|
8086
|
-
}
|
|
8087
|
-
if (this.dataQueue.length > 0) {
|
|
8088
|
-
setTimeout(() => this.processQueue());
|
|
8089
|
-
}
|
|
8090
|
-
}
|
|
8091
|
-
async iterateAsyncErrored(cb) {
|
|
8092
|
-
// Important: We need to copy the listeners, as calling a listener could result in adding another
|
|
8093
|
-
// listener, resulting in infinite loops.
|
|
8094
|
-
const listeners = Array.from(this.listeners.values());
|
|
8095
|
-
for (let i of listeners) {
|
|
8096
|
-
try {
|
|
8097
|
-
await cb(i);
|
|
8098
|
-
}
|
|
8099
|
-
catch (ex) {
|
|
8100
|
-
this.logger.error(ex);
|
|
8101
|
-
this.iterateListeners((l) => l.error?.(ex));
|
|
8102
|
-
}
|
|
8103
|
-
}
|
|
8104
|
-
}
|
|
8105
|
-
}
|
|
8106
|
-
|
|
8107
8227
|
var WebsocketDuplexConnection = {};
|
|
8108
8228
|
|
|
8109
8229
|
var hasRequiredWebsocketDuplexConnection;
|
|
@@ -8266,8 +8386,215 @@ class WebsocketClientTransport {
|
|
|
8266
8386
|
}
|
|
8267
8387
|
}
|
|
8268
8388
|
|
|
8389
|
+
const doneResult = { done: true, value: undefined };
|
|
8390
|
+
function valueResult(value) {
|
|
8391
|
+
return { done: false, value };
|
|
8392
|
+
}
|
|
8393
|
+
/**
|
|
8394
|
+
* A variant of {@link Array.map} for async iterators.
|
|
8395
|
+
*/
|
|
8396
|
+
function map(source, map) {
|
|
8397
|
+
return {
|
|
8398
|
+
next: async () => {
|
|
8399
|
+
const value = await source.next();
|
|
8400
|
+
if (value.done) {
|
|
8401
|
+
return value;
|
|
8402
|
+
}
|
|
8403
|
+
else {
|
|
8404
|
+
return { value: map(value.value) };
|
|
8405
|
+
}
|
|
8406
|
+
}
|
|
8407
|
+
};
|
|
8408
|
+
}
|
|
8409
|
+
/**
|
|
8410
|
+
* Expands a source async iterator by allowing to inject events asynchronously.
|
|
8411
|
+
*
|
|
8412
|
+
* The resulting iterator will emit all events from its source. Additionally though, events can be injected. These
|
|
8413
|
+
* events are dropped once the main iterator completes, but are otherwise forwarded.
|
|
8414
|
+
*
|
|
8415
|
+
* The iterator completes when its source completes, and it supports backpressure by only calling `next()` on the source
|
|
8416
|
+
* in response to a `next()` call from downstream if no pending injected events can be dispatched.
|
|
8417
|
+
*/
|
|
8418
|
+
function injectable(source) {
|
|
8419
|
+
let sourceIsDone = false;
|
|
8420
|
+
let waiter = undefined; // An active, waiting next() call.
|
|
8421
|
+
// A pending upstream event that couldn't be dispatched because inject() has been called before it was resolved.
|
|
8422
|
+
let pendingSourceEvent = null;
|
|
8423
|
+
let pendingInjectedEvents = [];
|
|
8424
|
+
const consumeWaiter = () => {
|
|
8425
|
+
const pending = waiter;
|
|
8426
|
+
waiter = undefined;
|
|
8427
|
+
return pending;
|
|
8428
|
+
};
|
|
8429
|
+
const fetchFromSource = () => {
|
|
8430
|
+
const resolveWaiter = (propagate) => {
|
|
8431
|
+
const active = consumeWaiter();
|
|
8432
|
+
if (active) {
|
|
8433
|
+
propagate(active);
|
|
8434
|
+
}
|
|
8435
|
+
else {
|
|
8436
|
+
pendingSourceEvent = propagate;
|
|
8437
|
+
}
|
|
8438
|
+
};
|
|
8439
|
+
const nextFromSource = source.next();
|
|
8440
|
+
nextFromSource.then((value) => {
|
|
8441
|
+
sourceIsDone = value.done == true;
|
|
8442
|
+
resolveWaiter((w) => w.resolve(value));
|
|
8443
|
+
}, (error) => {
|
|
8444
|
+
resolveWaiter((w) => w.reject(error));
|
|
8445
|
+
});
|
|
8446
|
+
};
|
|
8447
|
+
return {
|
|
8448
|
+
next: () => {
|
|
8449
|
+
return new Promise((resolve, reject) => {
|
|
8450
|
+
// First priority: Dispatch ready upstream events.
|
|
8451
|
+
if (sourceIsDone) {
|
|
8452
|
+
return resolve(doneResult);
|
|
8453
|
+
}
|
|
8454
|
+
if (pendingSourceEvent) {
|
|
8455
|
+
pendingSourceEvent({ resolve, reject });
|
|
8456
|
+
pendingSourceEvent = null;
|
|
8457
|
+
return;
|
|
8458
|
+
}
|
|
8459
|
+
// Second priority: Dispatch injected events
|
|
8460
|
+
if (pendingInjectedEvents.length) {
|
|
8461
|
+
return resolve(valueResult(pendingInjectedEvents.shift()));
|
|
8462
|
+
}
|
|
8463
|
+
// Nothing pending? Fetch from source
|
|
8464
|
+
waiter = { resolve, reject };
|
|
8465
|
+
return fetchFromSource();
|
|
8466
|
+
});
|
|
8467
|
+
},
|
|
8468
|
+
inject: (event) => {
|
|
8469
|
+
const pending = consumeWaiter();
|
|
8470
|
+
if (pending != null) {
|
|
8471
|
+
pending.resolve(valueResult(event));
|
|
8472
|
+
}
|
|
8473
|
+
else {
|
|
8474
|
+
pendingInjectedEvents.push(event);
|
|
8475
|
+
}
|
|
8476
|
+
}
|
|
8477
|
+
};
|
|
8478
|
+
}
|
|
8479
|
+
/**
|
|
8480
|
+
* Splits a byte stream at line endings, emitting each line as a string.
|
|
8481
|
+
*/
|
|
8482
|
+
function extractJsonLines(source, decoder) {
|
|
8483
|
+
let buffer = '';
|
|
8484
|
+
const pendingLines = [];
|
|
8485
|
+
let isFinalEvent = false;
|
|
8486
|
+
return {
|
|
8487
|
+
next: async () => {
|
|
8488
|
+
while (true) {
|
|
8489
|
+
if (isFinalEvent) {
|
|
8490
|
+
return doneResult;
|
|
8491
|
+
}
|
|
8492
|
+
{
|
|
8493
|
+
const first = pendingLines.shift();
|
|
8494
|
+
if (first) {
|
|
8495
|
+
return { done: false, value: first };
|
|
8496
|
+
}
|
|
8497
|
+
}
|
|
8498
|
+
const { done, value } = await source.next();
|
|
8499
|
+
if (done) {
|
|
8500
|
+
const remaining = buffer.trim();
|
|
8501
|
+
if (remaining.length != 0) {
|
|
8502
|
+
isFinalEvent = true;
|
|
8503
|
+
return { done: false, value: remaining };
|
|
8504
|
+
}
|
|
8505
|
+
return doneResult;
|
|
8506
|
+
}
|
|
8507
|
+
const data = decoder.decode(value, { stream: true });
|
|
8508
|
+
buffer += data;
|
|
8509
|
+
const lines = buffer.split('\n');
|
|
8510
|
+
for (let i = 0; i < lines.length - 1; i++) {
|
|
8511
|
+
const l = lines[i].trim();
|
|
8512
|
+
if (l.length > 0) {
|
|
8513
|
+
pendingLines.push(l);
|
|
8514
|
+
}
|
|
8515
|
+
}
|
|
8516
|
+
buffer = lines[lines.length - 1];
|
|
8517
|
+
}
|
|
8518
|
+
}
|
|
8519
|
+
};
|
|
8520
|
+
}
|
|
8521
|
+
/**
|
|
8522
|
+
* Splits a concatenated stream of BSON objects by emitting individual objects.
|
|
8523
|
+
*/
|
|
8524
|
+
function extractBsonObjects(source) {
|
|
8525
|
+
// Fully read but not emitted yet.
|
|
8526
|
+
const completedObjects = [];
|
|
8527
|
+
// Whether source has returned { done: true }. We do the same once completed objects have been emitted.
|
|
8528
|
+
let isDone = false;
|
|
8529
|
+
const lengthBuffer = new DataView(new ArrayBuffer(4));
|
|
8530
|
+
let objectBody = null;
|
|
8531
|
+
// If we're parsing the length field, a number between 1 and 4 (inclusive) describing remaining bytes in the header.
|
|
8532
|
+
// If we're consuming a document, the bytes remaining.
|
|
8533
|
+
let remainingLength = 4;
|
|
8534
|
+
return {
|
|
8535
|
+
async next() {
|
|
8536
|
+
while (true) {
|
|
8537
|
+
// Before fetching new data from upstream, return completed objects.
|
|
8538
|
+
if (completedObjects.length) {
|
|
8539
|
+
return valueResult(completedObjects.shift());
|
|
8540
|
+
}
|
|
8541
|
+
if (isDone) {
|
|
8542
|
+
return doneResult;
|
|
8543
|
+
}
|
|
8544
|
+
const upstreamEvent = await source.next();
|
|
8545
|
+
if (upstreamEvent.done) {
|
|
8546
|
+
isDone = true;
|
|
8547
|
+
if (objectBody || remainingLength != 4) {
|
|
8548
|
+
throw new Error('illegal end of stream in BSON object');
|
|
8549
|
+
}
|
|
8550
|
+
return doneResult;
|
|
8551
|
+
}
|
|
8552
|
+
const chunk = upstreamEvent.value;
|
|
8553
|
+
for (let i = 0; i < chunk.length;) {
|
|
8554
|
+
const availableInData = chunk.length - i;
|
|
8555
|
+
if (objectBody) {
|
|
8556
|
+
// We're in the middle of reading a BSON document.
|
|
8557
|
+
const bytesToRead = Math.min(availableInData, remainingLength);
|
|
8558
|
+
const copySource = new Uint8Array(chunk.buffer, chunk.byteOffset + i, bytesToRead);
|
|
8559
|
+
objectBody.set(copySource, objectBody.length - remainingLength);
|
|
8560
|
+
i += bytesToRead;
|
|
8561
|
+
remainingLength -= bytesToRead;
|
|
8562
|
+
if (remainingLength == 0) {
|
|
8563
|
+
completedObjects.push(objectBody);
|
|
8564
|
+
// Prepare to read another document, starting with its length
|
|
8565
|
+
objectBody = null;
|
|
8566
|
+
remainingLength = 4;
|
|
8567
|
+
}
|
|
8568
|
+
}
|
|
8569
|
+
else {
|
|
8570
|
+
// Copy up to 4 bytes into lengthBuffer, depending on how many we still need.
|
|
8571
|
+
const bytesToRead = Math.min(availableInData, remainingLength);
|
|
8572
|
+
for (let j = 0; j < bytesToRead; j++) {
|
|
8573
|
+
lengthBuffer.setUint8(4 - remainingLength + j, chunk[i + j]);
|
|
8574
|
+
}
|
|
8575
|
+
i += bytesToRead;
|
|
8576
|
+
remainingLength -= bytesToRead;
|
|
8577
|
+
if (remainingLength == 0) {
|
|
8578
|
+
// Transition from reading length header to reading document. Subtracting 4 because the length of the
|
|
8579
|
+
// header is included in length.
|
|
8580
|
+
const length = lengthBuffer.getInt32(0, true /* little endian */);
|
|
8581
|
+
remainingLength = length - 4;
|
|
8582
|
+
if (remainingLength < 1) {
|
|
8583
|
+
throw new Error(`invalid length for bson: ${length}`);
|
|
8584
|
+
}
|
|
8585
|
+
objectBody = new Uint8Array(length);
|
|
8586
|
+
new DataView(objectBody.buffer).setInt32(0, length, true);
|
|
8587
|
+
}
|
|
8588
|
+
}
|
|
8589
|
+
}
|
|
8590
|
+
}
|
|
8591
|
+
}
|
|
8592
|
+
};
|
|
8593
|
+
}
|
|
8594
|
+
|
|
8269
8595
|
const POWERSYNC_TRAILING_SLASH_MATCH = /\/+$/;
|
|
8270
8596
|
const POWERSYNC_JS_VERSION = PACKAGE.version;
|
|
8597
|
+
const SYNC_QUEUE_REQUEST_HIGH_WATER = 10;
|
|
8271
8598
|
const SYNC_QUEUE_REQUEST_LOW_WATER = 5;
|
|
8272
8599
|
// Keep alive message is sent every period
|
|
8273
8600
|
const KEEP_ALIVE_MS = 20_000;
|
|
@@ -8447,13 +8774,14 @@ class AbstractRemote {
|
|
|
8447
8774
|
return new WebSocket(url);
|
|
8448
8775
|
}
|
|
8449
8776
|
/**
|
|
8450
|
-
* Returns a data stream of sync line data.
|
|
8777
|
+
* Returns a data stream of sync line data, fetched via RSocket-over-WebSocket.
|
|
8778
|
+
*
|
|
8779
|
+
* The only mechanism to abort the returned stream is to use the abort signal in {@link SocketSyncStreamOptions}.
|
|
8451
8780
|
*
|
|
8452
|
-
* @param map Maps received payload frames to the typed event value.
|
|
8453
8781
|
* @param bson A BSON encoder and decoder. When set, the data stream will be requested with a BSON payload
|
|
8454
8782
|
* (required for compatibility with older sync services).
|
|
8455
8783
|
*/
|
|
8456
|
-
async socketStreamRaw(options,
|
|
8784
|
+
async socketStreamRaw(options, bson) {
|
|
8457
8785
|
const { path, fetchStrategy = FetchStrategy.Buffered } = options;
|
|
8458
8786
|
const mimeType = bson == null ? 'application/json' : 'application/bson';
|
|
8459
8787
|
function toBuffer(js) {
|
|
@@ -8468,52 +8796,55 @@ class AbstractRemote {
|
|
|
8468
8796
|
}
|
|
8469
8797
|
const syncQueueRequestSize = fetchStrategy == FetchStrategy.Buffered ? 10 : 1;
|
|
8470
8798
|
const request = await this.buildRequest(path);
|
|
8799
|
+
const url = this.options.socketUrlTransformer(request.url);
|
|
8471
8800
|
// Add the user agent in the setup payload - we can't set custom
|
|
8472
8801
|
// headers with websockets on web. The browser userAgent is however added
|
|
8473
8802
|
// automatically as a header.
|
|
8474
8803
|
const userAgent = this.getUserAgent();
|
|
8475
|
-
|
|
8476
|
-
|
|
8477
|
-
|
|
8478
|
-
|
|
8479
|
-
|
|
8480
|
-
|
|
8481
|
-
|
|
8804
|
+
// While we're connecting (a process that can't be aborted in RSocket), the WebSocket instance to close if we wanted
|
|
8805
|
+
// to abort the connection.
|
|
8806
|
+
let pendingSocket = null;
|
|
8807
|
+
let keepAliveTimeout;
|
|
8808
|
+
let rsocket = null;
|
|
8809
|
+
let queue = null;
|
|
8810
|
+
let didClose = false;
|
|
8811
|
+
const abortRequest = () => {
|
|
8812
|
+
if (didClose) {
|
|
8813
|
+
return;
|
|
8814
|
+
}
|
|
8815
|
+
didClose = true;
|
|
8816
|
+
clearTimeout(keepAliveTimeout);
|
|
8817
|
+
if (pendingSocket) {
|
|
8818
|
+
pendingSocket.close();
|
|
8819
|
+
}
|
|
8820
|
+
if (rsocket) {
|
|
8821
|
+
rsocket.close();
|
|
8822
|
+
}
|
|
8823
|
+
if (queue) {
|
|
8824
|
+
queue.stop();
|
|
8825
|
+
}
|
|
8826
|
+
};
|
|
8482
8827
|
// Handle upstream abort
|
|
8483
|
-
if (options.abortSignal
|
|
8828
|
+
if (options.abortSignal.aborted) {
|
|
8484
8829
|
throw new AbortOperation('Connection request aborted');
|
|
8485
8830
|
}
|
|
8486
8831
|
else {
|
|
8487
|
-
options.abortSignal
|
|
8488
|
-
stream.close();
|
|
8489
|
-
}, { once: true });
|
|
8832
|
+
options.abortSignal.addEventListener('abort', abortRequest);
|
|
8490
8833
|
}
|
|
8491
|
-
let keepAliveTimeout;
|
|
8492
8834
|
const resetTimeout = () => {
|
|
8493
8835
|
clearTimeout(keepAliveTimeout);
|
|
8494
8836
|
keepAliveTimeout = setTimeout(() => {
|
|
8495
8837
|
this.logger.error(`No data received on WebSocket in ${SOCKET_TIMEOUT_MS}ms, closing connection.`);
|
|
8496
|
-
|
|
8838
|
+
abortRequest();
|
|
8497
8839
|
}, SOCKET_TIMEOUT_MS);
|
|
8498
8840
|
};
|
|
8499
8841
|
resetTimeout();
|
|
8500
|
-
// Typescript complains about this being `never` if it's not assigned here.
|
|
8501
|
-
// This is assigned in `wsCreator`.
|
|
8502
|
-
let disposeSocketConnectionTimeout = () => { };
|
|
8503
|
-
const url = this.options.socketUrlTransformer(request.url);
|
|
8504
8842
|
const connector = new distExports.RSocketConnector({
|
|
8505
8843
|
transport: new WebsocketClientTransport({
|
|
8506
8844
|
url,
|
|
8507
8845
|
wsCreator: (url) => {
|
|
8508
|
-
const socket = this.createSocket(url);
|
|
8509
|
-
|
|
8510
|
-
closed: () => {
|
|
8511
|
-
// Allow closing the underlying WebSocket if the stream was closed before the
|
|
8512
|
-
// RSocket connect completed. This should effectively abort the request.
|
|
8513
|
-
socket.close();
|
|
8514
|
-
}
|
|
8515
|
-
});
|
|
8516
|
-
socket.addEventListener('message', (event) => {
|
|
8846
|
+
const socket = (pendingSocket = this.createSocket(url));
|
|
8847
|
+
socket.addEventListener('message', () => {
|
|
8517
8848
|
resetTimeout();
|
|
8518
8849
|
});
|
|
8519
8850
|
return socket;
|
|
@@ -8533,43 +8864,40 @@ class AbstractRemote {
|
|
|
8533
8864
|
}
|
|
8534
8865
|
}
|
|
8535
8866
|
});
|
|
8536
|
-
let rsocket;
|
|
8537
8867
|
try {
|
|
8538
8868
|
rsocket = await connector.connect();
|
|
8539
8869
|
// The connection is established, we no longer need to monitor the initial timeout
|
|
8540
|
-
|
|
8870
|
+
pendingSocket = null;
|
|
8541
8871
|
}
|
|
8542
8872
|
catch (ex) {
|
|
8543
8873
|
this.logger.error(`Failed to connect WebSocket`, ex);
|
|
8544
|
-
|
|
8545
|
-
if (!stream.closed) {
|
|
8546
|
-
await stream.close();
|
|
8547
|
-
}
|
|
8874
|
+
abortRequest();
|
|
8548
8875
|
throw ex;
|
|
8549
8876
|
}
|
|
8550
8877
|
resetTimeout();
|
|
8551
|
-
let socketIsClosed = false;
|
|
8552
|
-
const closeSocket = () => {
|
|
8553
|
-
clearTimeout(keepAliveTimeout);
|
|
8554
|
-
if (socketIsClosed) {
|
|
8555
|
-
return;
|
|
8556
|
-
}
|
|
8557
|
-
socketIsClosed = true;
|
|
8558
|
-
rsocket.close();
|
|
8559
|
-
};
|
|
8560
8878
|
// Helps to prevent double close scenarios
|
|
8561
|
-
rsocket.onClose(() => (
|
|
8562
|
-
|
|
8563
|
-
let pendingEventsCount = syncQueueRequestSize;
|
|
8564
|
-
const disposeClosedListener = stream.registerListener({
|
|
8565
|
-
closed: () => {
|
|
8566
|
-
closeSocket();
|
|
8567
|
-
disposeClosedListener();
|
|
8568
|
-
}
|
|
8569
|
-
});
|
|
8570
|
-
const socket = await new Promise((resolve, reject) => {
|
|
8879
|
+
rsocket.onClose(() => (rsocket = null));
|
|
8880
|
+
return await new Promise((resolve, reject) => {
|
|
8571
8881
|
let connectionEstablished = false;
|
|
8572
|
-
|
|
8882
|
+
let pendingEventsCount = syncQueueRequestSize;
|
|
8883
|
+
let paused = false;
|
|
8884
|
+
let res = null;
|
|
8885
|
+
function requestMore() {
|
|
8886
|
+
const delta = syncQueueRequestSize - pendingEventsCount;
|
|
8887
|
+
if (!paused && delta > 0) {
|
|
8888
|
+
res?.request(delta);
|
|
8889
|
+
pendingEventsCount = syncQueueRequestSize;
|
|
8890
|
+
}
|
|
8891
|
+
}
|
|
8892
|
+
const events = new EventIterator((q) => {
|
|
8893
|
+
queue = q;
|
|
8894
|
+
q.on('highWater', () => (paused = true));
|
|
8895
|
+
q.on('lowWater', () => {
|
|
8896
|
+
paused = false;
|
|
8897
|
+
requestMore();
|
|
8898
|
+
});
|
|
8899
|
+
}, { highWaterMark: SYNC_QUEUE_REQUEST_HIGH_WATER, lowWaterMark: SYNC_QUEUE_REQUEST_LOW_WATER })[Symbol.asyncIterator]();
|
|
8900
|
+
res = rsocket.requestStream({
|
|
8573
8901
|
data: toBuffer(options.data),
|
|
8574
8902
|
metadata: toBuffer({
|
|
8575
8903
|
path
|
|
@@ -8594,7 +8922,7 @@ class AbstractRemote {
|
|
|
8594
8922
|
}
|
|
8595
8923
|
// RSocket will close the RSocket stream automatically
|
|
8596
8924
|
// Close the downstream stream as well - this will close the RSocket connection and WebSocket
|
|
8597
|
-
|
|
8925
|
+
abortRequest();
|
|
8598
8926
|
// Handles cases where the connection failed e.g. auth error or connection error
|
|
8599
8927
|
if (!connectionEstablished) {
|
|
8600
8928
|
reject(e);
|
|
@@ -8604,41 +8932,40 @@ class AbstractRemote {
|
|
|
8604
8932
|
// The connection is active
|
|
8605
8933
|
if (!connectionEstablished) {
|
|
8606
8934
|
connectionEstablished = true;
|
|
8607
|
-
resolve(
|
|
8935
|
+
resolve(events);
|
|
8608
8936
|
}
|
|
8609
8937
|
const { data } = payload;
|
|
8938
|
+
if (data) {
|
|
8939
|
+
queue.push(data);
|
|
8940
|
+
}
|
|
8610
8941
|
// Less events are now pending
|
|
8611
8942
|
pendingEventsCount--;
|
|
8612
|
-
|
|
8613
|
-
|
|
8614
|
-
}
|
|
8615
|
-
stream.enqueueData(data);
|
|
8943
|
+
// Request another event (unless the downstream consumer is paused).
|
|
8944
|
+
requestMore();
|
|
8616
8945
|
},
|
|
8617
8946
|
onComplete: () => {
|
|
8618
|
-
|
|
8947
|
+
abortRequest(); // this will also emit a done event
|
|
8619
8948
|
},
|
|
8620
8949
|
onExtension: () => { }
|
|
8621
8950
|
});
|
|
8622
8951
|
});
|
|
8623
|
-
const l = stream.registerListener({
|
|
8624
|
-
lowWater: async () => {
|
|
8625
|
-
// Request to fill up the queue
|
|
8626
|
-
const required = syncQueueRequestSize - pendingEventsCount;
|
|
8627
|
-
if (required > 0) {
|
|
8628
|
-
socket.request(syncQueueRequestSize - pendingEventsCount);
|
|
8629
|
-
pendingEventsCount = syncQueueRequestSize;
|
|
8630
|
-
}
|
|
8631
|
-
},
|
|
8632
|
-
closed: () => {
|
|
8633
|
-
l();
|
|
8634
|
-
}
|
|
8635
|
-
});
|
|
8636
|
-
return stream;
|
|
8637
8952
|
}
|
|
8638
8953
|
/**
|
|
8639
|
-
*
|
|
8954
|
+
* @returns Whether the HTTP implementation on this platform can receive streamed binary responses. This is true on
|
|
8955
|
+
* all platforms except React Native (who would have guessed...), where we must not request BSON responses.
|
|
8956
|
+
*
|
|
8957
|
+
* @see https://github.com/react-native-community/fetch?tab=readme-ov-file#motivation
|
|
8958
|
+
*/
|
|
8959
|
+
get supportsStreamingBinaryResponses() {
|
|
8960
|
+
return true;
|
|
8961
|
+
}
|
|
8962
|
+
/**
|
|
8963
|
+
* Posts a `/sync/stream` request, asserts that it completes successfully and returns the streaming response as an
|
|
8964
|
+
* async iterator of byte blobs.
|
|
8965
|
+
*
|
|
8966
|
+
* To cancel the async iterator, use the abort signal from {@link SyncStreamOptions} passed to this method.
|
|
8640
8967
|
*/
|
|
8641
|
-
async
|
|
8968
|
+
async fetchStreamRaw(options) {
|
|
8642
8969
|
const { data, path, headers, abortSignal } = options;
|
|
8643
8970
|
const request = await this.buildRequest(path);
|
|
8644
8971
|
/**
|
|
@@ -8650,119 +8977,94 @@ class AbstractRemote {
|
|
|
8650
8977
|
* Aborting the active fetch request while it is being consumed seems to throw
|
|
8651
8978
|
* an unhandled exception on the window level.
|
|
8652
8979
|
*/
|
|
8653
|
-
if (abortSignal
|
|
8654
|
-
throw new AbortOperation('Abort request received before making
|
|
8980
|
+
if (abortSignal.aborted) {
|
|
8981
|
+
throw new AbortOperation('Abort request received before making fetchStreamRaw request');
|
|
8655
8982
|
}
|
|
8656
8983
|
const controller = new AbortController();
|
|
8657
|
-
let
|
|
8658
|
-
abortSignal
|
|
8659
|
-
|
|
8984
|
+
let reader = null;
|
|
8985
|
+
abortSignal.addEventListener('abort', () => {
|
|
8986
|
+
const reason = abortSignal.reason ??
|
|
8987
|
+
new AbortOperation('Cancelling network request before it resolves. Abort signal has been received.');
|
|
8988
|
+
if (reader == null) {
|
|
8660
8989
|
// Only abort via the abort controller if the request has not resolved yet
|
|
8661
|
-
controller.abort(
|
|
8662
|
-
|
|
8990
|
+
controller.abort(reason);
|
|
8991
|
+
}
|
|
8992
|
+
else {
|
|
8993
|
+
reader.cancel(reason).catch(() => {
|
|
8994
|
+
// Cancelling the reader might rethrow an exception we would have handled by throwing in next(). So we can
|
|
8995
|
+
// ignore it here.
|
|
8996
|
+
});
|
|
8663
8997
|
}
|
|
8664
8998
|
});
|
|
8665
|
-
|
|
8666
|
-
|
|
8667
|
-
|
|
8668
|
-
|
|
8669
|
-
|
|
8670
|
-
|
|
8671
|
-
|
|
8672
|
-
|
|
8673
|
-
|
|
8999
|
+
let res;
|
|
9000
|
+
let responseIsBson = false;
|
|
9001
|
+
try {
|
|
9002
|
+
const ndJson = 'application/x-ndjson';
|
|
9003
|
+
const bson = 'application/vnd.powersync.bson-stream';
|
|
9004
|
+
res = await this.fetch(request.url, {
|
|
9005
|
+
method: 'POST',
|
|
9006
|
+
headers: {
|
|
9007
|
+
...headers,
|
|
9008
|
+
...request.headers,
|
|
9009
|
+
accept: this.supportsStreamingBinaryResponses ? `${bson};q=0.9,${ndJson};q=0.8` : ndJson
|
|
9010
|
+
},
|
|
9011
|
+
body: JSON.stringify(data),
|
|
9012
|
+
signal: controller.signal,
|
|
9013
|
+
cache: 'no-store',
|
|
9014
|
+
...(this.options.fetchOptions ?? {}),
|
|
9015
|
+
...options.fetchOptions
|
|
9016
|
+
});
|
|
9017
|
+
if (!res.ok || !res.body) {
|
|
9018
|
+
const text = await res.text();
|
|
9019
|
+
this.logger.error(`Could not POST streaming to ${path} - ${res.status} - ${res.statusText}: ${text}`);
|
|
9020
|
+
const error = new Error(`HTTP ${res.statusText}: ${text}`);
|
|
9021
|
+
error.status = res.status;
|
|
9022
|
+
throw error;
|
|
9023
|
+
}
|
|
9024
|
+
const contentType = res.headers.get('content-type');
|
|
9025
|
+
responseIsBson = contentType == bson;
|
|
9026
|
+
}
|
|
9027
|
+
catch (ex) {
|
|
8674
9028
|
if (ex.name == 'AbortError') {
|
|
8675
9029
|
throw new AbortOperation(`Pending fetch request to ${request.url} has been aborted.`);
|
|
8676
9030
|
}
|
|
8677
9031
|
throw ex;
|
|
8678
|
-
});
|
|
8679
|
-
if (!res) {
|
|
8680
|
-
throw new Error('Fetch request was aborted');
|
|
8681
|
-
}
|
|
8682
|
-
requestResolved = true;
|
|
8683
|
-
if (!res.ok || !res.body) {
|
|
8684
|
-
const text = await res.text();
|
|
8685
|
-
this.logger.error(`Could not POST streaming to ${path} - ${res.status} - ${res.statusText}: ${text}`);
|
|
8686
|
-
const error = new Error(`HTTP ${res.statusText}: ${text}`);
|
|
8687
|
-
error.status = res.status;
|
|
8688
|
-
throw error;
|
|
8689
9032
|
}
|
|
8690
|
-
|
|
8691
|
-
|
|
8692
|
-
|
|
8693
|
-
|
|
8694
|
-
|
|
8695
|
-
const closeReader = async () => {
|
|
8696
|
-
try {
|
|
8697
|
-
readerReleased = true;
|
|
8698
|
-
await reader.cancel();
|
|
8699
|
-
}
|
|
8700
|
-
catch (ex) {
|
|
8701
|
-
// an error will throw if the reader hasn't been used yet
|
|
8702
|
-
}
|
|
8703
|
-
reader.releaseLock();
|
|
8704
|
-
};
|
|
8705
|
-
const stream = new DataStream({
|
|
8706
|
-
logger: this.logger,
|
|
8707
|
-
mapLine: mapLine,
|
|
8708
|
-
pressure: {
|
|
8709
|
-
highWaterMark: 20,
|
|
8710
|
-
lowWaterMark: 10
|
|
8711
|
-
}
|
|
8712
|
-
});
|
|
8713
|
-
abortSignal?.addEventListener('abort', () => {
|
|
8714
|
-
closeReader();
|
|
8715
|
-
stream.close();
|
|
8716
|
-
});
|
|
8717
|
-
const decoder = this.createTextDecoder();
|
|
8718
|
-
let buffer = '';
|
|
8719
|
-
const consumeStream = async () => {
|
|
8720
|
-
while (!stream.closed && !abortSignal?.aborted && !readerReleased) {
|
|
8721
|
-
const { done, value } = await reader.read();
|
|
8722
|
-
if (done) {
|
|
8723
|
-
const remaining = buffer.trim();
|
|
8724
|
-
if (remaining.length != 0) {
|
|
8725
|
-
stream.enqueueData(remaining);
|
|
8726
|
-
}
|
|
8727
|
-
stream.close();
|
|
8728
|
-
await closeReader();
|
|
8729
|
-
return;
|
|
9033
|
+
reader = res.body.getReader();
|
|
9034
|
+
const stream = {
|
|
9035
|
+
next: async () => {
|
|
9036
|
+
if (controller.signal.aborted) {
|
|
9037
|
+
return doneResult;
|
|
8730
9038
|
}
|
|
8731
|
-
|
|
8732
|
-
|
|
8733
|
-
const lines = buffer.split('\n');
|
|
8734
|
-
for (var i = 0; i < lines.length - 1; i++) {
|
|
8735
|
-
var l = lines[i].trim();
|
|
8736
|
-
if (l.length > 0) {
|
|
8737
|
-
stream.enqueueData(l);
|
|
8738
|
-
}
|
|
9039
|
+
try {
|
|
9040
|
+
return await reader.read();
|
|
8739
9041
|
}
|
|
8740
|
-
|
|
8741
|
-
|
|
8742
|
-
|
|
8743
|
-
|
|
8744
|
-
|
|
8745
|
-
|
|
8746
|
-
|
|
8747
|
-
dispose();
|
|
8748
|
-
},
|
|
8749
|
-
closed: () => {
|
|
8750
|
-
resolve();
|
|
8751
|
-
dispose();
|
|
8752
|
-
}
|
|
8753
|
-
});
|
|
8754
|
-
});
|
|
9042
|
+
catch (ex) {
|
|
9043
|
+
if (controller.signal.aborted) {
|
|
9044
|
+
// .read() completes with an error if we cancel the reader, which we do to disconnect. So this is just
|
|
9045
|
+
// things working as intended, we can return a done event and consider the exception handled.
|
|
9046
|
+
return doneResult;
|
|
9047
|
+
}
|
|
9048
|
+
throw ex;
|
|
8755
9049
|
}
|
|
8756
9050
|
}
|
|
8757
9051
|
};
|
|
8758
|
-
|
|
8759
|
-
|
|
8760
|
-
|
|
8761
|
-
|
|
8762
|
-
|
|
8763
|
-
|
|
8764
|
-
|
|
8765
|
-
|
|
9052
|
+
return { isBson: responseIsBson, stream };
|
|
9053
|
+
}
|
|
9054
|
+
/**
|
|
9055
|
+
* Posts a `/sync/stream` request.
|
|
9056
|
+
*
|
|
9057
|
+
* Depending on the `Content-Type` of the response, this returns strings for sync lines or encoded BSON documents as
|
|
9058
|
+
* {@link Uint8Array}s.
|
|
9059
|
+
*/
|
|
9060
|
+
async fetchStream(options) {
|
|
9061
|
+
const { isBson, stream } = await this.fetchStreamRaw(options);
|
|
9062
|
+
if (isBson) {
|
|
9063
|
+
return extractBsonObjects(stream);
|
|
9064
|
+
}
|
|
9065
|
+
else {
|
|
9066
|
+
return extractJsonLines(stream, this.createTextDecoder());
|
|
9067
|
+
}
|
|
8766
9068
|
}
|
|
8767
9069
|
}
|
|
8768
9070
|
|
|
@@ -9270,6 +9572,19 @@ The next upload iteration will be delayed.`);
|
|
|
9270
9572
|
}
|
|
9271
9573
|
});
|
|
9272
9574
|
}
|
|
9575
|
+
async receiveSyncLines(data) {
|
|
9576
|
+
const { options, connection, bson } = data;
|
|
9577
|
+
const remote = this.options.remote;
|
|
9578
|
+
if (connection.connectionMethod == SyncStreamConnectionMethod.HTTP) {
|
|
9579
|
+
return await remote.fetchStream(options);
|
|
9580
|
+
}
|
|
9581
|
+
else {
|
|
9582
|
+
return await this.options.remote.socketStreamRaw({
|
|
9583
|
+
...options,
|
|
9584
|
+
...{ fetchStrategy: connection.fetchStrategy }
|
|
9585
|
+
}, bson);
|
|
9586
|
+
}
|
|
9587
|
+
}
|
|
9273
9588
|
async legacyStreamingSyncIteration(signal, resolvedOptions) {
|
|
9274
9589
|
const rawTables = resolvedOptions.serializedSchema?.raw_tables;
|
|
9275
9590
|
if (rawTables != null && rawTables.length) {
|
|
@@ -9299,42 +9614,27 @@ The next upload iteration will be delayed.`);
|
|
|
9299
9614
|
client_id: clientId
|
|
9300
9615
|
}
|
|
9301
9616
|
};
|
|
9302
|
-
|
|
9303
|
-
|
|
9304
|
-
|
|
9305
|
-
|
|
9306
|
-
|
|
9307
|
-
|
|
9308
|
-
|
|
9309
|
-
|
|
9310
|
-
|
|
9311
|
-
|
|
9312
|
-
|
|
9313
|
-
|
|
9314
|
-
|
|
9315
|
-
|
|
9316
|
-
stream = await this.options.remote.socketStreamRaw({
|
|
9317
|
-
...syncOptions,
|
|
9318
|
-
...{ fetchStrategy: resolvedOptions.fetchStrategy }
|
|
9319
|
-
}, (payload) => {
|
|
9320
|
-
if (payload instanceof Uint8Array) {
|
|
9321
|
-
return bson.deserialize(payload);
|
|
9322
|
-
}
|
|
9323
|
-
else {
|
|
9324
|
-
// Directly enqueued by us
|
|
9325
|
-
return payload;
|
|
9326
|
-
}
|
|
9327
|
-
}, bson);
|
|
9328
|
-
}
|
|
9617
|
+
const bson = await this.options.remote.getBSON();
|
|
9618
|
+
const source = await this.receiveSyncLines({
|
|
9619
|
+
options: syncOptions,
|
|
9620
|
+
connection: resolvedOptions,
|
|
9621
|
+
bson
|
|
9622
|
+
});
|
|
9623
|
+
const stream = injectable(map(source, (line) => {
|
|
9624
|
+
if (typeof line == 'string') {
|
|
9625
|
+
return JSON.parse(line);
|
|
9626
|
+
}
|
|
9627
|
+
else {
|
|
9628
|
+
return bson.deserialize(line);
|
|
9629
|
+
}
|
|
9630
|
+
}));
|
|
9329
9631
|
this.logger.debug('Stream established. Processing events');
|
|
9330
9632
|
this.notifyCompletedUploads = () => {
|
|
9331
|
-
|
|
9332
|
-
stream.enqueueData({ crud_upload_completed: null });
|
|
9333
|
-
}
|
|
9633
|
+
stream.inject({ crud_upload_completed: null });
|
|
9334
9634
|
};
|
|
9335
|
-
while (
|
|
9336
|
-
const line = await stream.
|
|
9337
|
-
if (
|
|
9635
|
+
while (true) {
|
|
9636
|
+
const { value: line, done } = await stream.next();
|
|
9637
|
+
if (done) {
|
|
9338
9638
|
// The stream has closed while waiting
|
|
9339
9639
|
return;
|
|
9340
9640
|
}
|
|
@@ -9513,14 +9813,17 @@ The next upload iteration will be delayed.`);
|
|
|
9513
9813
|
const syncImplementation = this;
|
|
9514
9814
|
const adapter = this.options.adapter;
|
|
9515
9815
|
const remote = this.options.remote;
|
|
9816
|
+
const controller = new AbortController();
|
|
9817
|
+
const abort = () => {
|
|
9818
|
+
return controller.abort(signal.reason);
|
|
9819
|
+
};
|
|
9820
|
+
signal.addEventListener('abort', abort);
|
|
9516
9821
|
let receivingLines = null;
|
|
9517
9822
|
let hadSyncLine = false;
|
|
9518
9823
|
let hideDisconnectOnRestart = false;
|
|
9519
9824
|
if (signal.aborted) {
|
|
9520
9825
|
throw new AbortOperation('Connection request has been aborted');
|
|
9521
9826
|
}
|
|
9522
|
-
const abortController = new AbortController();
|
|
9523
|
-
signal.addEventListener('abort', () => abortController.abort());
|
|
9524
9827
|
// Pending sync lines received from the service, as well as local events that trigger a powersync_control
|
|
9525
9828
|
// invocation (local events include refreshed tokens and completed uploads).
|
|
9526
9829
|
// This is a single data stream so that we can handle all control calls from a single place.
|
|
@@ -9528,49 +9831,36 @@ The next upload iteration will be delayed.`);
|
|
|
9528
9831
|
async function connect(instr) {
|
|
9529
9832
|
const syncOptions = {
|
|
9530
9833
|
path: '/sync/stream',
|
|
9531
|
-
abortSignal:
|
|
9834
|
+
abortSignal: controller.signal,
|
|
9532
9835
|
data: instr.request
|
|
9533
9836
|
};
|
|
9534
|
-
|
|
9535
|
-
|
|
9536
|
-
|
|
9537
|
-
|
|
9538
|
-
|
|
9539
|
-
|
|
9540
|
-
|
|
9541
|
-
|
|
9542
|
-
|
|
9543
|
-
|
|
9544
|
-
|
|
9545
|
-
|
|
9546
|
-
|
|
9547
|
-
|
|
9548
|
-
|
|
9549
|
-
|
|
9550
|
-
|
|
9551
|
-
fetchStrategy: resolvedOptions.fetchStrategy
|
|
9552
|
-
}, (payload) => {
|
|
9553
|
-
if (payload instanceof Uint8Array) {
|
|
9554
|
-
return {
|
|
9555
|
-
command: PowerSyncControlCommand.PROCESS_BSON_LINE,
|
|
9556
|
-
payload: payload
|
|
9557
|
-
};
|
|
9558
|
-
}
|
|
9559
|
-
else {
|
|
9560
|
-
// Directly enqueued by us
|
|
9561
|
-
return payload;
|
|
9562
|
-
}
|
|
9563
|
-
});
|
|
9564
|
-
}
|
|
9837
|
+
controlInvocations = injectable(map(await syncImplementation.receiveSyncLines({
|
|
9838
|
+
options: syncOptions,
|
|
9839
|
+
connection: resolvedOptions
|
|
9840
|
+
}), (line) => {
|
|
9841
|
+
if (typeof line == 'string') {
|
|
9842
|
+
return {
|
|
9843
|
+
command: PowerSyncControlCommand.PROCESS_TEXT_LINE,
|
|
9844
|
+
payload: line
|
|
9845
|
+
};
|
|
9846
|
+
}
|
|
9847
|
+
else {
|
|
9848
|
+
return {
|
|
9849
|
+
command: PowerSyncControlCommand.PROCESS_BSON_LINE,
|
|
9850
|
+
payload: line
|
|
9851
|
+
};
|
|
9852
|
+
}
|
|
9853
|
+
}));
|
|
9565
9854
|
// The rust client will set connected: true after the first sync line because that's when it gets invoked, but
|
|
9566
9855
|
// we're already connected here and can report that.
|
|
9567
9856
|
syncImplementation.updateSyncStatus({ connected: true });
|
|
9568
9857
|
try {
|
|
9569
|
-
while (
|
|
9570
|
-
|
|
9571
|
-
if (
|
|
9572
|
-
|
|
9858
|
+
while (true) {
|
|
9859
|
+
let event = await controlInvocations.next();
|
|
9860
|
+
if (event.done) {
|
|
9861
|
+
break;
|
|
9573
9862
|
}
|
|
9863
|
+
const line = event.value;
|
|
9574
9864
|
await control(line.command, line.payload);
|
|
9575
9865
|
if (!hadSyncLine) {
|
|
9576
9866
|
syncImplementation.triggerCrudUpload();
|
|
@@ -9579,12 +9869,8 @@ The next upload iteration will be delayed.`);
|
|
|
9579
9869
|
}
|
|
9580
9870
|
}
|
|
9581
9871
|
finally {
|
|
9582
|
-
|
|
9583
|
-
|
|
9584
|
-
// refreshed. That would throw after closing (and we can't handle those events either way), so set this back
|
|
9585
|
-
// to null.
|
|
9586
|
-
controlInvocations = null;
|
|
9587
|
-
await activeInstructions.close();
|
|
9872
|
+
abort();
|
|
9873
|
+
signal.removeEventListener('abort', abort);
|
|
9588
9874
|
}
|
|
9589
9875
|
}
|
|
9590
9876
|
async function stop() {
|
|
@@ -9628,14 +9914,14 @@ The next upload iteration will be delayed.`);
|
|
|
9628
9914
|
remote.invalidateCredentials();
|
|
9629
9915
|
// Restart iteration after the credentials have been refreshed.
|
|
9630
9916
|
remote.fetchCredentials().then((_) => {
|
|
9631
|
-
controlInvocations?.
|
|
9917
|
+
controlInvocations?.inject({ command: PowerSyncControlCommand.NOTIFY_TOKEN_REFRESHED });
|
|
9632
9918
|
}, (err) => {
|
|
9633
9919
|
syncImplementation.logger.warn('Could not prefetch credentials', err);
|
|
9634
9920
|
});
|
|
9635
9921
|
}
|
|
9636
9922
|
}
|
|
9637
9923
|
else if ('CloseSyncStream' in instruction) {
|
|
9638
|
-
|
|
9924
|
+
controller.abort();
|
|
9639
9925
|
hideDisconnectOnRestart = instruction.CloseSyncStream.hide_disconnect;
|
|
9640
9926
|
}
|
|
9641
9927
|
else if ('FlushFileSystem' in instruction) ;
|
|
@@ -9664,17 +9950,13 @@ The next upload iteration will be delayed.`);
|
|
|
9664
9950
|
}
|
|
9665
9951
|
await control(PowerSyncControlCommand.START, JSON.stringify(options));
|
|
9666
9952
|
this.notifyCompletedUploads = () => {
|
|
9667
|
-
|
|
9668
|
-
controlInvocations.enqueueData({ command: PowerSyncControlCommand.NOTIFY_CRUD_UPLOAD_COMPLETED });
|
|
9669
|
-
}
|
|
9953
|
+
controlInvocations?.inject({ command: PowerSyncControlCommand.NOTIFY_CRUD_UPLOAD_COMPLETED });
|
|
9670
9954
|
};
|
|
9671
9955
|
this.handleActiveStreamsChange = () => {
|
|
9672
|
-
|
|
9673
|
-
|
|
9674
|
-
|
|
9675
|
-
|
|
9676
|
-
});
|
|
9677
|
-
}
|
|
9956
|
+
controlInvocations?.inject({
|
|
9957
|
+
command: PowerSyncControlCommand.UPDATE_SUBSCRIPTIONS,
|
|
9958
|
+
payload: JSON.stringify(this.activeStreams)
|
|
9959
|
+
});
|
|
9678
9960
|
};
|
|
9679
9961
|
await receivingLines;
|
|
9680
9962
|
}
|
|
@@ -10021,7 +10303,8 @@ class TriggerManagerImpl {
|
|
|
10021
10303
|
* we need to ensure we can cleanup the created resources.
|
|
10022
10304
|
* We unfortunately cannot rely on transaction rollback.
|
|
10023
10305
|
*/
|
|
10024
|
-
const cleanup = async (
|
|
10306
|
+
const cleanup = async (options) => {
|
|
10307
|
+
const { context } = options ?? {};
|
|
10025
10308
|
disposeWarningListener();
|
|
10026
10309
|
const doCleanup = async (tx) => {
|
|
10027
10310
|
await this.removeTriggers(tx, triggerIds);
|
|
@@ -10117,7 +10400,7 @@ class TriggerManagerImpl {
|
|
|
10117
10400
|
}
|
|
10118
10401
|
catch (error) {
|
|
10119
10402
|
try {
|
|
10120
|
-
await cleanup();
|
|
10403
|
+
await cleanup(setupContext ? { context: setupContext } : undefined);
|
|
10121
10404
|
}
|
|
10122
10405
|
catch (cleanupError) {
|
|
10123
10406
|
throw new AggregateError([error, cleanupError], 'Error during operation and cleanup');
|
|
@@ -10690,7 +10973,7 @@ class AbstractPowerSyncDatabase extends BaseObserver {
|
|
|
10690
10973
|
* @returns A transaction of CRUD operations to upload, or null if there are none
|
|
10691
10974
|
*/
|
|
10692
10975
|
async getNextCrudTransaction() {
|
|
10693
|
-
const iterator = this.getCrudTransactions()[
|
|
10976
|
+
const iterator = this.getCrudTransactions()[Symbol.asyncIterator]();
|
|
10694
10977
|
return (await iterator.next()).value;
|
|
10695
10978
|
}
|
|
10696
10979
|
/**
|
|
@@ -10726,7 +11009,7 @@ class AbstractPowerSyncDatabase extends BaseObserver {
|
|
|
10726
11009
|
*/
|
|
10727
11010
|
getCrudTransactions() {
|
|
10728
11011
|
return {
|
|
10729
|
-
[
|
|
11012
|
+
[Symbol.asyncIterator]: () => {
|
|
10730
11013
|
let lastCrudItemId = -1;
|
|
10731
11014
|
const sql = `
|
|
10732
11015
|
WITH RECURSIVE crud_entries AS (
|
|
@@ -10789,6 +11072,10 @@ SELECT * FROM crud_entries;
|
|
|
10789
11072
|
* Execute a SQL write (INSERT/UPDATE/DELETE) query
|
|
10790
11073
|
* and optionally return results.
|
|
10791
11074
|
*
|
|
11075
|
+
* When using the default client-side [JSON-based view system](https://docs.powersync.com/architecture/client-architecture#client-side-schema-and-sqlite-database-structure),
|
|
11076
|
+
* the returned result's `rowsAffected` may be `0` for successful `UPDATE` and `DELETE` statements.
|
|
11077
|
+
* Use a `RETURNING` clause and inspect `result.rows` when you need to confirm which rows changed.
|
|
11078
|
+
*
|
|
10792
11079
|
* @param sql The SQL query to execute
|
|
10793
11080
|
* @param parameters Optional array of parameters to bind to the query
|
|
10794
11081
|
* @returns The query result as an object with structured key-value pairs
|
|
@@ -10885,7 +11172,7 @@ SELECT * FROM crud_entries;
|
|
|
10885
11172
|
async readTransaction(callback, lockTimeout = DEFAULT_LOCK_TIMEOUT_MS) {
|
|
10886
11173
|
await this.waitForReady();
|
|
10887
11174
|
return this.database.readTransaction(async (tx) => {
|
|
10888
|
-
const res = await callback(
|
|
11175
|
+
const res = await callback(tx);
|
|
10889
11176
|
await tx.rollback();
|
|
10890
11177
|
return res;
|
|
10891
11178
|
}, { timeoutMs: lockTimeout });
|
|
@@ -11862,5 +12149,5 @@ const parseQuery = (query, parameters) => {
|
|
|
11862
12149
|
return { sqlStatement, parameters: parameters };
|
|
11863
12150
|
};
|
|
11864
12151
|
|
|
11865
|
-
export { ATTACHMENT_TABLE, AbortOperation, AbstractPowerSyncDatabase, AbstractPowerSyncDatabaseOpenFactory, AbstractQueryProcessor, AbstractRemote, AbstractStreamingSyncImplementation, ArrayComparator, AttachmentContext, AttachmentQueue, AttachmentService, AttachmentState, AttachmentTable, BaseObserver, Column, ColumnType, ConnectionClosedError, ConnectionManager, ControlledExecutor, CrudBatch, CrudEntry, CrudTransaction, DEFAULT_CRUD_BATCH_LIMIT, DEFAULT_CRUD_UPLOAD_THROTTLE_MS, DEFAULT_INDEX_COLUMN_OPTIONS, DEFAULT_INDEX_OPTIONS, DEFAULT_LOCK_TIMEOUT_MS, DEFAULT_POWERSYNC_CLOSE_OPTIONS, DEFAULT_POWERSYNC_DB_OPTIONS,
|
|
12152
|
+
export { ATTACHMENT_TABLE, AbortOperation, AbstractPowerSyncDatabase, AbstractPowerSyncDatabaseOpenFactory, AbstractQueryProcessor, AbstractRemote, AbstractStreamingSyncImplementation, ArrayComparator, AttachmentContext, AttachmentQueue, AttachmentService, AttachmentState, AttachmentTable, BaseObserver, Column, ColumnType, ConnectionClosedError, ConnectionManager, ControlledExecutor, CrudBatch, CrudEntry, CrudTransaction, DBAdapterDefaultMixin, DBGetUtilsDefaultMixin, DEFAULT_CRUD_BATCH_LIMIT, DEFAULT_CRUD_UPLOAD_THROTTLE_MS, DEFAULT_INDEX_COLUMN_OPTIONS, DEFAULT_INDEX_OPTIONS, DEFAULT_LOCK_TIMEOUT_MS, DEFAULT_POWERSYNC_CLOSE_OPTIONS, DEFAULT_POWERSYNC_DB_OPTIONS, DEFAULT_REMOTE_LOGGER, DEFAULT_REMOTE_OPTIONS, DEFAULT_RETRY_DELAY_MS, DEFAULT_ROW_COMPARATOR, DEFAULT_STREAMING_SYNC_OPTIONS, DEFAULT_STREAM_CONNECTION_OPTIONS, DEFAULT_SYNC_CLIENT_IMPLEMENTATION, DEFAULT_TABLE_OPTIONS, DEFAULT_WATCH_QUERY_OPTIONS, DEFAULT_WATCH_THROTTLE_MS, DiffTriggerOperation, DifferentialQueryProcessor, EMPTY_DIFFERENTIAL, EncodingType, FalsyComparator, FetchImplementationProvider, FetchStrategy, GetAllQuery, Index, IndexedColumn, InvalidSQLCharacters, LockType, LogLevel, MAX_AMOUNT_OF_COLUMNS, MAX_OP_ID, MEMORY_TRIGGER_CLAIM_MANAGER, Mutex, OnChangeQueryProcessor, OpType, OpTypeEnum, OplogEntry, PSInternalTable, PowerSyncControlCommand, RowUpdateType, Schema, Semaphore, SqliteBucketStorage, SyncClientImplementation, SyncDataBatch, SyncDataBucket, SyncProgress, SyncStatus, SyncStreamConnectionMethod, SyncingService, Table, TableV2, TriggerManagerImpl, UpdateType, UploadQueueStats, WatchedQueryListenerEvent, attachmentFromSql, column, compilableQueryWatch, createBaseLogger, createLogger, extractTableUpdates, isBatchedUpdateNotification, isContinueCheckpointRequest, isDBAdapter, isPowerSyncDatabaseOptionsWithSettings, isSQLOpenFactory, isSQLOpenOptions, isStreamingKeepalive, isStreamingSyncCheckpoint, isStreamingSyncCheckpointComplete, isStreamingSyncCheckpointDiff, isStreamingSyncCheckpointPartiallyComplete, isStreamingSyncData, isSyncNewCheckpointRequest, parseQuery, runOnSchemaChange, sanitizeSQL, sanitizeUUID, timeoutSignal };
|
|
11866
12153
|
//# sourceMappingURL=bundle.node.mjs.map
|