@powersync/common 0.0.0-dev-20260311103504 → 0.0.0-dev-20260503073249
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bundle.cjs +791 -489
- package/dist/bundle.cjs.map +1 -1
- package/dist/bundle.mjs +785 -485
- package/dist/bundle.mjs.map +1 -1
- package/dist/bundle.node.cjs +789 -488
- package/dist/bundle.node.cjs.map +1 -1
- package/dist/bundle.node.mjs +783 -484
- package/dist/bundle.node.mjs.map +1 -1
- package/dist/index.d.cts +165 -103
- package/lib/attachments/AttachmentQueue.d.ts +10 -4
- package/lib/attachments/AttachmentQueue.js +10 -4
- package/lib/attachments/AttachmentQueue.js.map +1 -1
- package/lib/attachments/AttachmentService.js +2 -3
- package/lib/attachments/AttachmentService.js.map +1 -1
- package/lib/attachments/SyncingService.d.ts +2 -1
- package/lib/attachments/SyncingService.js +4 -5
- package/lib/attachments/SyncingService.js.map +1 -1
- package/lib/client/AbstractPowerSyncDatabase.d.ts +5 -1
- package/lib/client/AbstractPowerSyncDatabase.js +9 -5
- package/lib/client/AbstractPowerSyncDatabase.js.map +1 -1
- package/lib/client/sync/stream/AbstractRemote.d.ts +29 -8
- package/lib/client/sync/stream/AbstractRemote.js +154 -177
- package/lib/client/sync/stream/AbstractRemote.js.map +1 -1
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.d.ts +4 -0
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js +88 -88
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js.map +1 -1
- package/lib/db/DBAdapter.d.ts +55 -9
- package/lib/db/DBAdapter.js +126 -0
- package/lib/db/DBAdapter.js.map +1 -1
- package/lib/db/crud/SyncStatus.d.ts +0 -4
- package/lib/db/crud/SyncStatus.js +0 -4
- package/lib/db/crud/SyncStatus.js.map +1 -1
- package/lib/db/schema/RawTable.d.ts +0 -5
- package/lib/db/schema/Schema.d.ts +0 -2
- package/lib/db/schema/Schema.js +0 -2
- package/lib/db/schema/Schema.js.map +1 -1
- package/lib/index.d.ts +1 -1
- package/lib/index.js +0 -1
- package/lib/index.js.map +1 -1
- package/lib/utils/async.d.ts +0 -9
- package/lib/utils/async.js +0 -9
- package/lib/utils/async.js.map +1 -1
- package/lib/utils/mutex.d.ts +47 -5
- package/lib/utils/mutex.js +146 -21
- package/lib/utils/mutex.js.map +1 -1
- package/lib/utils/queue.d.ts +16 -0
- package/lib/utils/queue.js +42 -0
- package/lib/utils/queue.js.map +1 -0
- package/lib/utils/stream_transform.d.ts +39 -0
- package/lib/utils/stream_transform.js +206 -0
- package/lib/utils/stream_transform.js.map +1 -0
- package/package.json +9 -8
- package/src/attachments/AttachmentQueue.ts +10 -4
- package/src/attachments/AttachmentService.ts +2 -3
- package/src/attachments/README.md +6 -4
- package/src/attachments/SyncingService.ts +4 -5
- package/src/client/AbstractPowerSyncDatabase.ts +9 -5
- package/src/client/sync/stream/AbstractRemote.ts +182 -206
- package/src/client/sync/stream/AbstractStreamingSyncImplementation.ts +96 -83
- package/src/db/DBAdapter.ts +167 -9
- package/src/db/crud/SyncStatus.ts +0 -4
- package/src/db/schema/RawTable.ts +0 -5
- package/src/db/schema/Schema.ts +0 -2
- package/src/index.ts +1 -1
- package/src/utils/async.ts +0 -11
- package/src/utils/mutex.ts +184 -26
- package/src/utils/queue.ts +48 -0
- package/src/utils/stream_transform.ts +252 -0
- package/lib/utils/DataStream.d.ts +0 -62
- package/lib/utils/DataStream.js +0 -169
- package/lib/utils/DataStream.js.map +0 -1
- package/src/utils/DataStream.ts +0 -222
package/dist/bundle.node.cjs
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
'use strict';
|
|
2
2
|
|
|
3
|
-
var asyncMutex = require('async-mutex');
|
|
4
3
|
var eventIterator = require('event-iterator');
|
|
5
4
|
var node_buffer = require('node:buffer');
|
|
6
5
|
|
|
@@ -661,7 +660,7 @@ class SyncingService {
|
|
|
661
660
|
updatedAttachments.push(downloaded);
|
|
662
661
|
break;
|
|
663
662
|
case exports.AttachmentState.QUEUED_DELETE:
|
|
664
|
-
const deleted = await this.deleteAttachment(attachment);
|
|
663
|
+
const deleted = await this.deleteAttachment(attachment, context);
|
|
665
664
|
updatedAttachments.push(deleted);
|
|
666
665
|
break;
|
|
667
666
|
}
|
|
@@ -739,17 +738,16 @@ class SyncingService {
|
|
|
739
738
|
* On failure, defers to error handler or archives.
|
|
740
739
|
*
|
|
741
740
|
* @param attachment - The attachment record to delete
|
|
741
|
+
* @param context - Attachment context for database operations
|
|
742
742
|
* @returns Updated attachment record
|
|
743
743
|
*/
|
|
744
|
-
async deleteAttachment(attachment) {
|
|
744
|
+
async deleteAttachment(attachment, context) {
|
|
745
745
|
try {
|
|
746
746
|
await this.remoteStorage.deleteFile(attachment);
|
|
747
747
|
if (attachment.localUri) {
|
|
748
748
|
await this.localStorage.deleteFile(attachment.localUri);
|
|
749
749
|
}
|
|
750
|
-
await
|
|
751
|
-
await ctx.deleteAttachment(attachment.id);
|
|
752
|
-
});
|
|
750
|
+
await context.deleteAttachment(attachment.id);
|
|
753
751
|
return {
|
|
754
752
|
...attachment,
|
|
755
753
|
state: exports.AttachmentState.ARCHIVED
|
|
@@ -787,32 +785,198 @@ class SyncingService {
|
|
|
787
785
|
}
|
|
788
786
|
|
|
789
787
|
/**
|
|
790
|
-
*
|
|
788
|
+
* A simple fixed-capacity queue implementation.
|
|
789
|
+
*
|
|
790
|
+
* Unlike a naive queue implemented by `array.push()` and `array.shift()`, this avoids moving array elements around
|
|
791
|
+
* and is `O(1)` for {@link addLast} and {@link removeFirst}.
|
|
791
792
|
*/
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
793
|
+
class Queue {
|
|
794
|
+
table;
|
|
795
|
+
// Index of the first element in the table.
|
|
796
|
+
head;
|
|
797
|
+
// Amount of items currently in the queue.
|
|
798
|
+
_length;
|
|
799
|
+
constructor(initialItems) {
|
|
800
|
+
this.table = [...initialItems];
|
|
801
|
+
this.head = 0;
|
|
802
|
+
this._length = this.table.length;
|
|
803
|
+
}
|
|
804
|
+
get isEmpty() {
|
|
805
|
+
return this.length == 0;
|
|
806
|
+
}
|
|
807
|
+
get length() {
|
|
808
|
+
return this._length;
|
|
809
|
+
}
|
|
810
|
+
removeFirst() {
|
|
811
|
+
if (this.isEmpty) {
|
|
812
|
+
throw new Error('Queue is empty');
|
|
813
|
+
}
|
|
814
|
+
const result = this.table[this.head];
|
|
815
|
+
this._length--;
|
|
816
|
+
this.table[this.head] = undefined;
|
|
817
|
+
this.head = (this.head + 1) % this.table.length;
|
|
818
|
+
return result;
|
|
819
|
+
}
|
|
820
|
+
addLast(element) {
|
|
821
|
+
if (this.length == this.table.length) {
|
|
822
|
+
throw new Error('Queue is full');
|
|
823
|
+
}
|
|
824
|
+
this.table[(this.head + this._length) % this.table.length] = element;
|
|
825
|
+
this._length++;
|
|
826
|
+
}
|
|
827
|
+
}
|
|
828
|
+
|
|
829
|
+
/**
|
|
830
|
+
* An asynchronous semaphore implementation with associated items per lease.
|
|
831
|
+
*
|
|
832
|
+
* @internal This class is meant to be used in PowerSync SDKs only, and is not part of the public API.
|
|
833
|
+
*/
|
|
834
|
+
class Semaphore {
|
|
835
|
+
// Available items that are not currently assigned to a waiter.
|
|
836
|
+
available;
|
|
837
|
+
size;
|
|
838
|
+
// Linked list of waiters. We don't expect the wait list to become particularly large, and this allows removing
|
|
839
|
+
// aborted waiters from the middle of the list efficiently.
|
|
840
|
+
firstWaiter;
|
|
841
|
+
lastWaiter;
|
|
842
|
+
constructor(elements) {
|
|
843
|
+
this.available = new Queue(elements);
|
|
844
|
+
this.size = this.available.length;
|
|
845
|
+
}
|
|
846
|
+
addWaiter(requestedItems, onAcquire) {
|
|
847
|
+
const node = {
|
|
848
|
+
isActive: true,
|
|
849
|
+
acquiredItems: [],
|
|
850
|
+
remainingItems: requestedItems,
|
|
851
|
+
onAcquire,
|
|
852
|
+
prev: this.lastWaiter
|
|
853
|
+
};
|
|
854
|
+
if (this.lastWaiter) {
|
|
855
|
+
this.lastWaiter.next = node;
|
|
856
|
+
this.lastWaiter = node;
|
|
857
|
+
}
|
|
858
|
+
else {
|
|
859
|
+
// First waiter
|
|
860
|
+
this.lastWaiter = this.firstWaiter = node;
|
|
861
|
+
}
|
|
862
|
+
return node;
|
|
863
|
+
}
|
|
864
|
+
deactivateWaiter(waiter) {
|
|
865
|
+
const { prev, next } = waiter;
|
|
866
|
+
waiter.isActive = false;
|
|
867
|
+
if (prev)
|
|
868
|
+
prev.next = next;
|
|
869
|
+
if (next)
|
|
870
|
+
next.prev = prev;
|
|
871
|
+
if (waiter == this.firstWaiter)
|
|
872
|
+
this.firstWaiter = next;
|
|
873
|
+
if (waiter == this.lastWaiter)
|
|
874
|
+
this.lastWaiter = prev;
|
|
875
|
+
}
|
|
876
|
+
requestPermits(amount, abort) {
|
|
877
|
+
if (amount <= 0 || amount > this.size) {
|
|
878
|
+
throw new Error(`Invalid amount of items requested (${amount}), must be between 1 and ${this.size}`);
|
|
879
|
+
}
|
|
880
|
+
return new Promise((resolve, reject) => {
|
|
881
|
+
function rejectAborted() {
|
|
882
|
+
reject(abort?.reason ?? new Error('Semaphore acquire aborted'));
|
|
883
|
+
}
|
|
884
|
+
if (abort?.aborted) {
|
|
885
|
+
return rejectAborted();
|
|
886
|
+
}
|
|
887
|
+
let waiter;
|
|
888
|
+
const markCompleted = () => {
|
|
889
|
+
const items = waiter.acquiredItems;
|
|
890
|
+
waiter.acquiredItems = []; // Avoid releasing items twice.
|
|
891
|
+
for (const element of items) {
|
|
892
|
+
// Give to next waiter, if possible.
|
|
893
|
+
const nextWaiter = this.firstWaiter;
|
|
894
|
+
if (nextWaiter) {
|
|
895
|
+
nextWaiter.acquiredItems.push(element);
|
|
896
|
+
nextWaiter.remainingItems--;
|
|
897
|
+
if (nextWaiter.remainingItems == 0) {
|
|
898
|
+
nextWaiter.onAcquire();
|
|
899
|
+
}
|
|
900
|
+
}
|
|
901
|
+
else {
|
|
902
|
+
// No pending waiter, return lease into pool.
|
|
903
|
+
this.available.addLast(element);
|
|
904
|
+
}
|
|
905
|
+
}
|
|
906
|
+
};
|
|
907
|
+
const onAbort = () => {
|
|
908
|
+
abort?.removeEventListener('abort', onAbort);
|
|
909
|
+
if (waiter.isActive) {
|
|
910
|
+
this.deactivateWaiter(waiter);
|
|
911
|
+
rejectAborted();
|
|
912
|
+
}
|
|
913
|
+
};
|
|
914
|
+
const resolvePromise = () => {
|
|
915
|
+
this.deactivateWaiter(waiter);
|
|
916
|
+
abort?.removeEventListener('abort', onAbort);
|
|
917
|
+
const items = waiter.acquiredItems;
|
|
918
|
+
resolve({ items, release: markCompleted });
|
|
919
|
+
};
|
|
920
|
+
waiter = this.addWaiter(amount, resolvePromise);
|
|
921
|
+
// If there are items in the pool that haven't been assigned, we can pull them into this waiter. Note that this is
|
|
922
|
+
// only the case if we're the first waiter (otherwise, items would have been assigned to an earlier waiter).
|
|
923
|
+
while (!this.available.isEmpty && waiter.remainingItems > 0) {
|
|
924
|
+
waiter.acquiredItems.push(this.available.removeFirst());
|
|
925
|
+
waiter.remainingItems--;
|
|
810
926
|
}
|
|
811
|
-
|
|
812
|
-
|
|
927
|
+
if (waiter.remainingItems == 0) {
|
|
928
|
+
return resolvePromise();
|
|
813
929
|
}
|
|
930
|
+
abort?.addEventListener('abort', onAbort);
|
|
814
931
|
});
|
|
815
|
-
}
|
|
932
|
+
}
|
|
933
|
+
/**
|
|
934
|
+
* Requests a single item from the pool.
|
|
935
|
+
*
|
|
936
|
+
* The returned `release` callback must be invoked to return the item into the pool.
|
|
937
|
+
*/
|
|
938
|
+
async requestOne(abort) {
|
|
939
|
+
const { items, release } = await this.requestPermits(1, abort);
|
|
940
|
+
return { release, item: items[0] };
|
|
941
|
+
}
|
|
942
|
+
/**
|
|
943
|
+
* Requests access to all items from the pool.
|
|
944
|
+
*
|
|
945
|
+
* The returned `release` callback must be invoked to return items into the pool.
|
|
946
|
+
*/
|
|
947
|
+
requestAll(abort) {
|
|
948
|
+
return this.requestPermits(this.size, abort);
|
|
949
|
+
}
|
|
950
|
+
}
|
|
951
|
+
/**
|
|
952
|
+
* An asynchronous mutex implementation.
|
|
953
|
+
*
|
|
954
|
+
* @internal This class is meant to be used in PowerSync SDKs only, and is not part of the public API.
|
|
955
|
+
*/
|
|
956
|
+
class Mutex {
|
|
957
|
+
inner = new Semaphore([null]);
|
|
958
|
+
async acquire(abort) {
|
|
959
|
+
const { release } = await this.inner.requestOne(abort);
|
|
960
|
+
return release;
|
|
961
|
+
}
|
|
962
|
+
async runExclusive(fn, abort) {
|
|
963
|
+
const returnMutex = await this.acquire(abort);
|
|
964
|
+
try {
|
|
965
|
+
return await fn();
|
|
966
|
+
}
|
|
967
|
+
finally {
|
|
968
|
+
returnMutex();
|
|
969
|
+
}
|
|
970
|
+
}
|
|
971
|
+
}
|
|
972
|
+
function timeoutSignal(timeout) {
|
|
973
|
+
if (timeout == null)
|
|
974
|
+
return;
|
|
975
|
+
if ('timeout' in AbortSignal)
|
|
976
|
+
return AbortSignal.timeout(timeout);
|
|
977
|
+
const controller = new AbortController();
|
|
978
|
+
setTimeout(() => controller.abort(new Error('Timeout waiting for lock')), timeout);
|
|
979
|
+
return controller.signal;
|
|
816
980
|
}
|
|
817
981
|
|
|
818
982
|
/**
|
|
@@ -824,7 +988,7 @@ class AttachmentService {
|
|
|
824
988
|
db;
|
|
825
989
|
logger;
|
|
826
990
|
tableName;
|
|
827
|
-
mutex = new
|
|
991
|
+
mutex = new Mutex();
|
|
828
992
|
context;
|
|
829
993
|
constructor(db, logger, tableName = 'attachments', archivedCacheLimit = 100) {
|
|
830
994
|
this.db = db;
|
|
@@ -861,7 +1025,7 @@ class AttachmentService {
|
|
|
861
1025
|
* Executes a callback with exclusive access to the attachment context.
|
|
862
1026
|
*/
|
|
863
1027
|
async withContext(callback) {
|
|
864
|
-
return
|
|
1028
|
+
return this.mutex.runExclusive(async () => {
|
|
865
1029
|
return callback(this.context);
|
|
866
1030
|
});
|
|
867
1031
|
}
|
|
@@ -897,9 +1061,15 @@ class AttachmentQueue {
|
|
|
897
1061
|
tableName;
|
|
898
1062
|
/** Logger instance for diagnostic information */
|
|
899
1063
|
logger;
|
|
900
|
-
/** Interval in milliseconds between periodic sync operations.
|
|
1064
|
+
/** Interval in milliseconds between periodic sync operations. Acts as a polling timer to retry
|
|
1065
|
+
* failed uploads/downloads, especially after the app goes offline. Default: 30000 (30 seconds) */
|
|
901
1066
|
syncIntervalMs = 30 * 1000;
|
|
902
|
-
/**
|
|
1067
|
+
/** Throttle duration in milliseconds for the reactive watch query on the attachments table.
|
|
1068
|
+
* When attachment records change, a watch query detects the change and triggers a sync.
|
|
1069
|
+
* This throttle prevents the sync from firing too rapidly when many changes happen in
|
|
1070
|
+
* quick succession (e.g., bulk inserts). This is distinct from syncIntervalMs — it controls
|
|
1071
|
+
* how quickly the queue reacts to changes, while syncIntervalMs controls how often it polls
|
|
1072
|
+
* for retries. Default: 30 (from DEFAULT_WATCH_THROTTLE_MS) */
|
|
903
1073
|
syncThrottleDuration;
|
|
904
1074
|
/** Whether to automatically download remote attachments. Default: true */
|
|
905
1075
|
downloadAttachments = true;
|
|
@@ -923,8 +1093,8 @@ class AttachmentQueue {
|
|
|
923
1093
|
* @param options.watchAttachments - Callback for monitoring attachment changes in your data model
|
|
924
1094
|
* @param options.tableName - Name of the table to store attachment records. Default: 'ps_attachment_queue'
|
|
925
1095
|
* @param options.logger - Logger instance. Defaults to db.logger
|
|
926
|
-
* @param options.syncIntervalMs -
|
|
927
|
-
* @param options.syncThrottleDuration - Throttle duration for
|
|
1096
|
+
* @param options.syncIntervalMs - Periodic polling interval in milliseconds for retrying failed uploads/downloads. Default: 30000
|
|
1097
|
+
* @param options.syncThrottleDuration - Throttle duration in milliseconds for the reactive watch query that detects attachment changes. Prevents rapid-fire syncs during bulk changes. Default: 30
|
|
928
1098
|
* @param options.downloadAttachments - Whether to automatically download remote attachments. Default: true
|
|
929
1099
|
* @param options.archivedCacheLimit - Maximum archived attachments before cleanup. Default: 100
|
|
930
1100
|
*/
|
|
@@ -1532,6 +1702,49 @@ var Logger = /*@__PURE__*/getDefaultExportFromCjs(loggerExports);
|
|
|
1532
1702
|
* Set of generic interfaces to allow PowerSync compatibility with
|
|
1533
1703
|
* different SQLite DB implementations.
|
|
1534
1704
|
*/
|
|
1705
|
+
/**
|
|
1706
|
+
* Implements {@link DBGetUtils} on a {@link SqlRunner}.
|
|
1707
|
+
*/
|
|
1708
|
+
function DBGetUtilsDefaultMixin(Base) {
|
|
1709
|
+
return class extends Base {
|
|
1710
|
+
async getAll(sql, parameters) {
|
|
1711
|
+
const res = await this.execute(sql, parameters);
|
|
1712
|
+
return res.rows?._array ?? [];
|
|
1713
|
+
}
|
|
1714
|
+
async getOptional(sql, parameters) {
|
|
1715
|
+
const res = await this.execute(sql, parameters);
|
|
1716
|
+
return res.rows?.item(0) ?? null;
|
|
1717
|
+
}
|
|
1718
|
+
async get(sql, parameters) {
|
|
1719
|
+
const res = await this.execute(sql, parameters);
|
|
1720
|
+
const first = res.rows?.item(0);
|
|
1721
|
+
if (!first) {
|
|
1722
|
+
throw new Error('Result set is empty');
|
|
1723
|
+
}
|
|
1724
|
+
return first;
|
|
1725
|
+
}
|
|
1726
|
+
async executeBatch(query, params = []) {
|
|
1727
|
+
// If this context can run batch statements natively, use that.
|
|
1728
|
+
// @ts-ignore
|
|
1729
|
+
if (super.executeBatch) {
|
|
1730
|
+
// @ts-ignore
|
|
1731
|
+
return super.executeBatch(query, params);
|
|
1732
|
+
}
|
|
1733
|
+
// Emulate executeBatch by running statements individually.
|
|
1734
|
+
let lastInsertId;
|
|
1735
|
+
let rowsAffected = 0;
|
|
1736
|
+
for (const set of params) {
|
|
1737
|
+
const result = await this.execute(query, set);
|
|
1738
|
+
lastInsertId = result.insertId;
|
|
1739
|
+
rowsAffected += result.rowsAffected;
|
|
1740
|
+
}
|
|
1741
|
+
return {
|
|
1742
|
+
rowsAffected,
|
|
1743
|
+
insertId: lastInsertId
|
|
1744
|
+
};
|
|
1745
|
+
}
|
|
1746
|
+
};
|
|
1747
|
+
}
|
|
1535
1748
|
/**
|
|
1536
1749
|
* Update table operation numbers from SQLite
|
|
1537
1750
|
*/
|
|
@@ -1541,6 +1754,89 @@ exports.RowUpdateType = void 0;
|
|
|
1541
1754
|
RowUpdateType[RowUpdateType["SQLITE_DELETE"] = 9] = "SQLITE_DELETE";
|
|
1542
1755
|
RowUpdateType[RowUpdateType["SQLITE_UPDATE"] = 23] = "SQLITE_UPDATE";
|
|
1543
1756
|
})(exports.RowUpdateType || (exports.RowUpdateType = {}));
|
|
1757
|
+
/**
|
|
1758
|
+
* A mixin to implement {@link DBAdapter} by delegating to {@link ConnectionPool.readLock} and
|
|
1759
|
+
* {@link ConnectionPool.writeLock}.
|
|
1760
|
+
*/
|
|
1761
|
+
function DBAdapterDefaultMixin(Base) {
|
|
1762
|
+
return class extends Base {
|
|
1763
|
+
readTransaction(fn, options) {
|
|
1764
|
+
return this.readLock((ctx) => TransactionImplementation.runWith(ctx, fn), options);
|
|
1765
|
+
}
|
|
1766
|
+
writeTransaction(fn, options) {
|
|
1767
|
+
return this.writeLock((ctx) => TransactionImplementation.runWith(ctx, fn), options);
|
|
1768
|
+
}
|
|
1769
|
+
getAll(sql, parameters) {
|
|
1770
|
+
return this.readLock((ctx) => ctx.getAll(sql, parameters));
|
|
1771
|
+
}
|
|
1772
|
+
getOptional(sql, parameters) {
|
|
1773
|
+
return this.readLock((ctx) => ctx.getOptional(sql, parameters));
|
|
1774
|
+
}
|
|
1775
|
+
get(sql, parameters) {
|
|
1776
|
+
return this.readLock((ctx) => ctx.get(sql, parameters));
|
|
1777
|
+
}
|
|
1778
|
+
execute(query, params) {
|
|
1779
|
+
return this.writeLock((ctx) => ctx.execute(query, params));
|
|
1780
|
+
}
|
|
1781
|
+
executeRaw(query, params) {
|
|
1782
|
+
return this.writeLock((ctx) => ctx.executeRaw(query, params));
|
|
1783
|
+
}
|
|
1784
|
+
executeBatch(query, params) {
|
|
1785
|
+
return this.writeTransaction((tx) => tx.executeBatch(query, params));
|
|
1786
|
+
}
|
|
1787
|
+
};
|
|
1788
|
+
}
|
|
1789
|
+
class BaseTransaction {
|
|
1790
|
+
inner;
|
|
1791
|
+
finalized = false;
|
|
1792
|
+
constructor(inner) {
|
|
1793
|
+
this.inner = inner;
|
|
1794
|
+
}
|
|
1795
|
+
async commit() {
|
|
1796
|
+
if (this.finalized) {
|
|
1797
|
+
return { rowsAffected: 0 };
|
|
1798
|
+
}
|
|
1799
|
+
this.finalized = true;
|
|
1800
|
+
return this.inner.execute('COMMIT');
|
|
1801
|
+
}
|
|
1802
|
+
async rollback() {
|
|
1803
|
+
if (this.finalized) {
|
|
1804
|
+
return { rowsAffected: 0 };
|
|
1805
|
+
}
|
|
1806
|
+
this.finalized = true;
|
|
1807
|
+
return this.inner.execute('ROLLBACK');
|
|
1808
|
+
}
|
|
1809
|
+
execute(query, params) {
|
|
1810
|
+
return this.inner.execute(query, params);
|
|
1811
|
+
}
|
|
1812
|
+
executeRaw(query, params) {
|
|
1813
|
+
return this.inner.executeRaw(query, params);
|
|
1814
|
+
}
|
|
1815
|
+
executeBatch(query, params) {
|
|
1816
|
+
return this.inner.executeBatch(query, params);
|
|
1817
|
+
}
|
|
1818
|
+
}
|
|
1819
|
+
class TransactionImplementation extends DBGetUtilsDefaultMixin(BaseTransaction) {
|
|
1820
|
+
static async runWith(ctx, fn) {
|
|
1821
|
+
let tx = new TransactionImplementation(ctx);
|
|
1822
|
+
try {
|
|
1823
|
+
await ctx.execute('BEGIN IMMEDIATE');
|
|
1824
|
+
const result = await fn(tx);
|
|
1825
|
+
await tx.commit();
|
|
1826
|
+
return result;
|
|
1827
|
+
}
|
|
1828
|
+
catch (ex) {
|
|
1829
|
+
try {
|
|
1830
|
+
await tx.rollback();
|
|
1831
|
+
}
|
|
1832
|
+
catch (ex2) {
|
|
1833
|
+
// In rare cases, a rollback may fail.
|
|
1834
|
+
// Safe to ignore.
|
|
1835
|
+
}
|
|
1836
|
+
throw ex;
|
|
1837
|
+
}
|
|
1838
|
+
}
|
|
1839
|
+
}
|
|
1544
1840
|
function isBatchedUpdateNotification(update) {
|
|
1545
1841
|
return 'tables' in update;
|
|
1546
1842
|
}
|
|
@@ -1683,16 +1979,12 @@ class SyncStatus {
|
|
|
1683
1979
|
*
|
|
1684
1980
|
* This returns null when the database is currently being opened and we don't have reliable information about all
|
|
1685
1981
|
* included streams yet.
|
|
1686
|
-
*
|
|
1687
|
-
* @experimental Sync streams are currently in alpha.
|
|
1688
1982
|
*/
|
|
1689
1983
|
get syncStreams() {
|
|
1690
1984
|
return this.options.dataFlow?.internalStreamSubscriptions?.map((core) => new SyncStreamStatusView(this, core));
|
|
1691
1985
|
}
|
|
1692
1986
|
/**
|
|
1693
1987
|
* If the `stream` appears in {@link syncStreams}, returns the current status for that stream.
|
|
1694
|
-
*
|
|
1695
|
-
* @experimental Sync streams are currently in alpha.
|
|
1696
1988
|
*/
|
|
1697
1989
|
forStream(stream) {
|
|
1698
1990
|
const asJson = JSON.stringify(stream.parameters);
|
|
@@ -1961,15 +2253,6 @@ class ControlledExecutor {
|
|
|
1961
2253
|
}
|
|
1962
2254
|
}
|
|
1963
2255
|
|
|
1964
|
-
/**
|
|
1965
|
-
* A ponyfill for `Symbol.asyncIterator` that is compatible with the
|
|
1966
|
-
* [recommended polyfill](https://github.com/Azure/azure-sdk-for-js/blob/%40azure/core-asynciterator-polyfill_1.0.2/sdk/core/core-asynciterator-polyfill/src/index.ts#L4-L6)
|
|
1967
|
-
* we recommend for React Native.
|
|
1968
|
-
*
|
|
1969
|
-
* As long as we use this symbol (instead of `for await` and `async *`) in this package, we can be compatible with async
|
|
1970
|
-
* iterators without requiring them.
|
|
1971
|
-
*/
|
|
1972
|
-
const symbolAsyncIterator = Symbol.asyncIterator ?? Symbol.for('Symbol.asyncIterator');
|
|
1973
2256
|
/**
|
|
1974
2257
|
* Throttle a function to be called at most once every "wait" milliseconds,
|
|
1975
2258
|
* on the trailing edge.
|
|
@@ -7935,177 +8218,10 @@ function requireDist () {
|
|
|
7935
8218
|
|
|
7936
8219
|
var distExports = requireDist();
|
|
7937
8220
|
|
|
7938
|
-
var version = "1.
|
|
8221
|
+
var version = "1.52.0";
|
|
7939
8222
|
var PACKAGE = {
|
|
7940
8223
|
version: version};
|
|
7941
8224
|
|
|
7942
|
-
const DEFAULT_PRESSURE_LIMITS = {
|
|
7943
|
-
highWater: 10,
|
|
7944
|
-
lowWater: 0
|
|
7945
|
-
};
|
|
7946
|
-
/**
|
|
7947
|
-
* A very basic implementation of a data stream with backpressure support which does not use
|
|
7948
|
-
* native JS streams or async iterators.
|
|
7949
|
-
* This is handy for environments such as React Native which need polyfills for the above.
|
|
7950
|
-
*/
|
|
7951
|
-
class DataStream extends BaseObserver {
|
|
7952
|
-
options;
|
|
7953
|
-
dataQueue;
|
|
7954
|
-
isClosed;
|
|
7955
|
-
processingPromise;
|
|
7956
|
-
notifyDataAdded;
|
|
7957
|
-
logger;
|
|
7958
|
-
mapLine;
|
|
7959
|
-
constructor(options) {
|
|
7960
|
-
super();
|
|
7961
|
-
this.options = options;
|
|
7962
|
-
this.processingPromise = null;
|
|
7963
|
-
this.isClosed = false;
|
|
7964
|
-
this.dataQueue = [];
|
|
7965
|
-
this.mapLine = options?.mapLine ?? ((line) => line);
|
|
7966
|
-
this.logger = options?.logger ?? Logger.get('DataStream');
|
|
7967
|
-
if (options?.closeOnError) {
|
|
7968
|
-
const l = this.registerListener({
|
|
7969
|
-
error: (ex) => {
|
|
7970
|
-
l?.();
|
|
7971
|
-
this.close();
|
|
7972
|
-
}
|
|
7973
|
-
});
|
|
7974
|
-
}
|
|
7975
|
-
}
|
|
7976
|
-
get highWatermark() {
|
|
7977
|
-
return this.options?.pressure?.highWaterMark ?? DEFAULT_PRESSURE_LIMITS.highWater;
|
|
7978
|
-
}
|
|
7979
|
-
get lowWatermark() {
|
|
7980
|
-
return this.options?.pressure?.lowWaterMark ?? DEFAULT_PRESSURE_LIMITS.lowWater;
|
|
7981
|
-
}
|
|
7982
|
-
get closed() {
|
|
7983
|
-
return this.isClosed;
|
|
7984
|
-
}
|
|
7985
|
-
async close() {
|
|
7986
|
-
this.isClosed = true;
|
|
7987
|
-
await this.processingPromise;
|
|
7988
|
-
this.iterateListeners((l) => l.closed?.());
|
|
7989
|
-
// Discard any data in the queue
|
|
7990
|
-
this.dataQueue = [];
|
|
7991
|
-
this.listeners.clear();
|
|
7992
|
-
}
|
|
7993
|
-
/**
|
|
7994
|
-
* Enqueues data for the consumers to read
|
|
7995
|
-
*/
|
|
7996
|
-
enqueueData(data) {
|
|
7997
|
-
if (this.isClosed) {
|
|
7998
|
-
throw new Error('Cannot enqueue data into closed stream.');
|
|
7999
|
-
}
|
|
8000
|
-
this.dataQueue.push(data);
|
|
8001
|
-
this.notifyDataAdded?.();
|
|
8002
|
-
this.processQueue();
|
|
8003
|
-
}
|
|
8004
|
-
/**
|
|
8005
|
-
* Reads data once from the data stream
|
|
8006
|
-
* @returns a Data payload or Null if the stream closed.
|
|
8007
|
-
*/
|
|
8008
|
-
async read() {
|
|
8009
|
-
if (this.closed) {
|
|
8010
|
-
return null;
|
|
8011
|
-
}
|
|
8012
|
-
// Wait for any pending processing to complete first.
|
|
8013
|
-
// This ensures we register our listener before calling processQueue(),
|
|
8014
|
-
// avoiding a race where processQueue() sees no reader and returns early.
|
|
8015
|
-
if (this.processingPromise) {
|
|
8016
|
-
await this.processingPromise;
|
|
8017
|
-
}
|
|
8018
|
-
// Re-check after await - stream may have closed while we were waiting
|
|
8019
|
-
if (this.closed) {
|
|
8020
|
-
return null;
|
|
8021
|
-
}
|
|
8022
|
-
return new Promise((resolve, reject) => {
|
|
8023
|
-
const l = this.registerListener({
|
|
8024
|
-
data: async (data) => {
|
|
8025
|
-
resolve(data);
|
|
8026
|
-
// Remove the listener
|
|
8027
|
-
l?.();
|
|
8028
|
-
},
|
|
8029
|
-
closed: () => {
|
|
8030
|
-
resolve(null);
|
|
8031
|
-
l?.();
|
|
8032
|
-
},
|
|
8033
|
-
error: (ex) => {
|
|
8034
|
-
reject(ex);
|
|
8035
|
-
l?.();
|
|
8036
|
-
}
|
|
8037
|
-
});
|
|
8038
|
-
this.processQueue();
|
|
8039
|
-
});
|
|
8040
|
-
}
|
|
8041
|
-
/**
|
|
8042
|
-
* Executes a callback for each data item in the stream
|
|
8043
|
-
*/
|
|
8044
|
-
forEach(callback) {
|
|
8045
|
-
if (this.dataQueue.length <= this.lowWatermark) {
|
|
8046
|
-
this.iterateAsyncErrored(async (l) => l.lowWater?.());
|
|
8047
|
-
}
|
|
8048
|
-
return this.registerListener({
|
|
8049
|
-
data: callback
|
|
8050
|
-
});
|
|
8051
|
-
}
|
|
8052
|
-
processQueue() {
|
|
8053
|
-
if (this.processingPromise) {
|
|
8054
|
-
return;
|
|
8055
|
-
}
|
|
8056
|
-
const promise = (this.processingPromise = this._processQueue());
|
|
8057
|
-
promise.finally(() => {
|
|
8058
|
-
this.processingPromise = null;
|
|
8059
|
-
});
|
|
8060
|
-
return promise;
|
|
8061
|
-
}
|
|
8062
|
-
hasDataReader() {
|
|
8063
|
-
return Array.from(this.listeners.values()).some((l) => !!l.data);
|
|
8064
|
-
}
|
|
8065
|
-
async _processQueue() {
|
|
8066
|
-
/**
|
|
8067
|
-
* Allow listeners to mutate the queue before processing.
|
|
8068
|
-
* This allows for operations such as dropping or compressing data
|
|
8069
|
-
* on high water or requesting more data on low water.
|
|
8070
|
-
*/
|
|
8071
|
-
if (this.dataQueue.length >= this.highWatermark) {
|
|
8072
|
-
await this.iterateAsyncErrored(async (l) => l.highWater?.());
|
|
8073
|
-
}
|
|
8074
|
-
if (this.isClosed || !this.hasDataReader()) {
|
|
8075
|
-
return;
|
|
8076
|
-
}
|
|
8077
|
-
if (this.dataQueue.length) {
|
|
8078
|
-
const data = this.dataQueue.shift();
|
|
8079
|
-
const mapped = this.mapLine(data);
|
|
8080
|
-
await this.iterateAsyncErrored(async (l) => l.data?.(mapped));
|
|
8081
|
-
}
|
|
8082
|
-
if (this.dataQueue.length <= this.lowWatermark) {
|
|
8083
|
-
const dataAdded = new Promise((resolve) => {
|
|
8084
|
-
this.notifyDataAdded = resolve;
|
|
8085
|
-
});
|
|
8086
|
-
await Promise.race([this.iterateAsyncErrored(async (l) => l.lowWater?.()), dataAdded]);
|
|
8087
|
-
this.notifyDataAdded = null;
|
|
8088
|
-
}
|
|
8089
|
-
if (this.dataQueue.length > 0) {
|
|
8090
|
-
setTimeout(() => this.processQueue());
|
|
8091
|
-
}
|
|
8092
|
-
}
|
|
8093
|
-
async iterateAsyncErrored(cb) {
|
|
8094
|
-
// Important: We need to copy the listeners, as calling a listener could result in adding another
|
|
8095
|
-
// listener, resulting in infinite loops.
|
|
8096
|
-
const listeners = Array.from(this.listeners.values());
|
|
8097
|
-
for (let i of listeners) {
|
|
8098
|
-
try {
|
|
8099
|
-
await cb(i);
|
|
8100
|
-
}
|
|
8101
|
-
catch (ex) {
|
|
8102
|
-
this.logger.error(ex);
|
|
8103
|
-
this.iterateListeners((l) => l.error?.(ex));
|
|
8104
|
-
}
|
|
8105
|
-
}
|
|
8106
|
-
}
|
|
8107
|
-
}
|
|
8108
|
-
|
|
8109
8225
|
var WebsocketDuplexConnection = {};
|
|
8110
8226
|
|
|
8111
8227
|
var hasRequiredWebsocketDuplexConnection;
|
|
@@ -8268,8 +8384,215 @@ class WebsocketClientTransport {
|
|
|
8268
8384
|
}
|
|
8269
8385
|
}
|
|
8270
8386
|
|
|
8387
|
+
const doneResult = { done: true, value: undefined };
|
|
8388
|
+
function valueResult(value) {
|
|
8389
|
+
return { done: false, value };
|
|
8390
|
+
}
|
|
8391
|
+
/**
|
|
8392
|
+
* A variant of {@link Array.map} for async iterators.
|
|
8393
|
+
*/
|
|
8394
|
+
function map(source, map) {
|
|
8395
|
+
return {
|
|
8396
|
+
next: async () => {
|
|
8397
|
+
const value = await source.next();
|
|
8398
|
+
if (value.done) {
|
|
8399
|
+
return value;
|
|
8400
|
+
}
|
|
8401
|
+
else {
|
|
8402
|
+
return { value: map(value.value) };
|
|
8403
|
+
}
|
|
8404
|
+
}
|
|
8405
|
+
};
|
|
8406
|
+
}
|
|
8407
|
+
/**
|
|
8408
|
+
* Expands a source async iterator by allowing to inject events asynchronously.
|
|
8409
|
+
*
|
|
8410
|
+
* The resulting iterator will emit all events from its source. Additionally though, events can be injected. These
|
|
8411
|
+
* events are dropped once the main iterator completes, but are otherwise forwarded.
|
|
8412
|
+
*
|
|
8413
|
+
* The iterator completes when its source completes, and it supports backpressure by only calling `next()` on the source
|
|
8414
|
+
* in response to a `next()` call from downstream if no pending injected events can be dispatched.
|
|
8415
|
+
*/
|
|
8416
|
+
function injectable(source) {
|
|
8417
|
+
let sourceIsDone = false;
|
|
8418
|
+
let waiter = undefined; // An active, waiting next() call.
|
|
8419
|
+
// A pending upstream event that couldn't be dispatched because inject() has been called before it was resolved.
|
|
8420
|
+
let pendingSourceEvent = null;
|
|
8421
|
+
let pendingInjectedEvents = [];
|
|
8422
|
+
const consumeWaiter = () => {
|
|
8423
|
+
const pending = waiter;
|
|
8424
|
+
waiter = undefined;
|
|
8425
|
+
return pending;
|
|
8426
|
+
};
|
|
8427
|
+
const fetchFromSource = () => {
|
|
8428
|
+
const resolveWaiter = (propagate) => {
|
|
8429
|
+
const active = consumeWaiter();
|
|
8430
|
+
if (active) {
|
|
8431
|
+
propagate(active);
|
|
8432
|
+
}
|
|
8433
|
+
else {
|
|
8434
|
+
pendingSourceEvent = propagate;
|
|
8435
|
+
}
|
|
8436
|
+
};
|
|
8437
|
+
const nextFromSource = source.next();
|
|
8438
|
+
nextFromSource.then((value) => {
|
|
8439
|
+
sourceIsDone = value.done == true;
|
|
8440
|
+
resolveWaiter((w) => w.resolve(value));
|
|
8441
|
+
}, (error) => {
|
|
8442
|
+
resolveWaiter((w) => w.reject(error));
|
|
8443
|
+
});
|
|
8444
|
+
};
|
|
8445
|
+
return {
|
|
8446
|
+
next: () => {
|
|
8447
|
+
return new Promise((resolve, reject) => {
|
|
8448
|
+
// First priority: Dispatch ready upstream events.
|
|
8449
|
+
if (sourceIsDone) {
|
|
8450
|
+
return resolve(doneResult);
|
|
8451
|
+
}
|
|
8452
|
+
if (pendingSourceEvent) {
|
|
8453
|
+
pendingSourceEvent({ resolve, reject });
|
|
8454
|
+
pendingSourceEvent = null;
|
|
8455
|
+
return;
|
|
8456
|
+
}
|
|
8457
|
+
// Second priority: Dispatch injected events
|
|
8458
|
+
if (pendingInjectedEvents.length) {
|
|
8459
|
+
return resolve(valueResult(pendingInjectedEvents.shift()));
|
|
8460
|
+
}
|
|
8461
|
+
// Nothing pending? Fetch from source
|
|
8462
|
+
waiter = { resolve, reject };
|
|
8463
|
+
return fetchFromSource();
|
|
8464
|
+
});
|
|
8465
|
+
},
|
|
8466
|
+
inject: (event) => {
|
|
8467
|
+
const pending = consumeWaiter();
|
|
8468
|
+
if (pending != null) {
|
|
8469
|
+
pending.resolve(valueResult(event));
|
|
8470
|
+
}
|
|
8471
|
+
else {
|
|
8472
|
+
pendingInjectedEvents.push(event);
|
|
8473
|
+
}
|
|
8474
|
+
}
|
|
8475
|
+
};
|
|
8476
|
+
}
|
|
8477
|
+
/**
|
|
8478
|
+
* Splits a byte stream at line endings, emitting each line as a string.
|
|
8479
|
+
*/
|
|
8480
|
+
function extractJsonLines(source, decoder) {
|
|
8481
|
+
let buffer = '';
|
|
8482
|
+
const pendingLines = [];
|
|
8483
|
+
let isFinalEvent = false;
|
|
8484
|
+
return {
|
|
8485
|
+
next: async () => {
|
|
8486
|
+
while (true) {
|
|
8487
|
+
if (isFinalEvent) {
|
|
8488
|
+
return doneResult;
|
|
8489
|
+
}
|
|
8490
|
+
{
|
|
8491
|
+
const first = pendingLines.shift();
|
|
8492
|
+
if (first) {
|
|
8493
|
+
return { done: false, value: first };
|
|
8494
|
+
}
|
|
8495
|
+
}
|
|
8496
|
+
const { done, value } = await source.next();
|
|
8497
|
+
if (done) {
|
|
8498
|
+
const remaining = buffer.trim();
|
|
8499
|
+
if (remaining.length != 0) {
|
|
8500
|
+
isFinalEvent = true;
|
|
8501
|
+
return { done: false, value: remaining };
|
|
8502
|
+
}
|
|
8503
|
+
return doneResult;
|
|
8504
|
+
}
|
|
8505
|
+
const data = decoder.decode(value, { stream: true });
|
|
8506
|
+
buffer += data;
|
|
8507
|
+
const lines = buffer.split('\n');
|
|
8508
|
+
for (let i = 0; i < lines.length - 1; i++) {
|
|
8509
|
+
const l = lines[i].trim();
|
|
8510
|
+
if (l.length > 0) {
|
|
8511
|
+
pendingLines.push(l);
|
|
8512
|
+
}
|
|
8513
|
+
}
|
|
8514
|
+
buffer = lines[lines.length - 1];
|
|
8515
|
+
}
|
|
8516
|
+
}
|
|
8517
|
+
};
|
|
8518
|
+
}
|
|
8519
|
+
/**
|
|
8520
|
+
* Splits a concatenated stream of BSON objects by emitting individual objects.
|
|
8521
|
+
*/
|
|
8522
|
+
function extractBsonObjects(source) {
|
|
8523
|
+
// Fully read but not emitted yet.
|
|
8524
|
+
const completedObjects = [];
|
|
8525
|
+
// Whether source has returned { done: true }. We do the same once completed objects have been emitted.
|
|
8526
|
+
let isDone = false;
|
|
8527
|
+
const lengthBuffer = new DataView(new ArrayBuffer(4));
|
|
8528
|
+
let objectBody = null;
|
|
8529
|
+
// If we're parsing the length field, a number between 1 and 4 (inclusive) describing remaining bytes in the header.
|
|
8530
|
+
// If we're consuming a document, the bytes remaining.
|
|
8531
|
+
let remainingLength = 4;
|
|
8532
|
+
return {
|
|
8533
|
+
async next() {
|
|
8534
|
+
while (true) {
|
|
8535
|
+
// Before fetching new data from upstream, return completed objects.
|
|
8536
|
+
if (completedObjects.length) {
|
|
8537
|
+
return valueResult(completedObjects.shift());
|
|
8538
|
+
}
|
|
8539
|
+
if (isDone) {
|
|
8540
|
+
return doneResult;
|
|
8541
|
+
}
|
|
8542
|
+
const upstreamEvent = await source.next();
|
|
8543
|
+
if (upstreamEvent.done) {
|
|
8544
|
+
isDone = true;
|
|
8545
|
+
if (objectBody || remainingLength != 4) {
|
|
8546
|
+
throw new Error('illegal end of stream in BSON object');
|
|
8547
|
+
}
|
|
8548
|
+
return doneResult;
|
|
8549
|
+
}
|
|
8550
|
+
const chunk = upstreamEvent.value;
|
|
8551
|
+
for (let i = 0; i < chunk.length;) {
|
|
8552
|
+
const availableInData = chunk.length - i;
|
|
8553
|
+
if (objectBody) {
|
|
8554
|
+
// We're in the middle of reading a BSON document.
|
|
8555
|
+
const bytesToRead = Math.min(availableInData, remainingLength);
|
|
8556
|
+
const copySource = new Uint8Array(chunk.buffer, chunk.byteOffset + i, bytesToRead);
|
|
8557
|
+
objectBody.set(copySource, objectBody.length - remainingLength);
|
|
8558
|
+
i += bytesToRead;
|
|
8559
|
+
remainingLength -= bytesToRead;
|
|
8560
|
+
if (remainingLength == 0) {
|
|
8561
|
+
completedObjects.push(objectBody);
|
|
8562
|
+
// Prepare to read another document, starting with its length
|
|
8563
|
+
objectBody = null;
|
|
8564
|
+
remainingLength = 4;
|
|
8565
|
+
}
|
|
8566
|
+
}
|
|
8567
|
+
else {
|
|
8568
|
+
// Copy up to 4 bytes into lengthBuffer, depending on how many we still need.
|
|
8569
|
+
const bytesToRead = Math.min(availableInData, remainingLength);
|
|
8570
|
+
for (let j = 0; j < bytesToRead; j++) {
|
|
8571
|
+
lengthBuffer.setUint8(4 - remainingLength + j, chunk[i + j]);
|
|
8572
|
+
}
|
|
8573
|
+
i += bytesToRead;
|
|
8574
|
+
remainingLength -= bytesToRead;
|
|
8575
|
+
if (remainingLength == 0) {
|
|
8576
|
+
// Transition from reading length header to reading document. Subtracting 4 because the length of the
|
|
8577
|
+
// header is included in length.
|
|
8578
|
+
const length = lengthBuffer.getInt32(0, true /* little endian */);
|
|
8579
|
+
remainingLength = length - 4;
|
|
8580
|
+
if (remainingLength < 1) {
|
|
8581
|
+
throw new Error(`invalid length for bson: ${length}`);
|
|
8582
|
+
}
|
|
8583
|
+
objectBody = new Uint8Array(length);
|
|
8584
|
+
new DataView(objectBody.buffer).setInt32(0, length, true);
|
|
8585
|
+
}
|
|
8586
|
+
}
|
|
8587
|
+
}
|
|
8588
|
+
}
|
|
8589
|
+
}
|
|
8590
|
+
};
|
|
8591
|
+
}
|
|
8592
|
+
|
|
8271
8593
|
const POWERSYNC_TRAILING_SLASH_MATCH = /\/+$/;
|
|
8272
8594
|
const POWERSYNC_JS_VERSION = PACKAGE.version;
|
|
8595
|
+
const SYNC_QUEUE_REQUEST_HIGH_WATER = 10;
|
|
8273
8596
|
const SYNC_QUEUE_REQUEST_LOW_WATER = 5;
|
|
8274
8597
|
// Keep alive message is sent every period
|
|
8275
8598
|
const KEEP_ALIVE_MS = 20_000;
|
|
@@ -8449,13 +8772,14 @@ class AbstractRemote {
|
|
|
8449
8772
|
return new WebSocket(url);
|
|
8450
8773
|
}
|
|
8451
8774
|
/**
|
|
8452
|
-
* Returns a data stream of sync line data.
|
|
8775
|
+
* Returns a data stream of sync line data, fetched via RSocket-over-WebSocket.
|
|
8776
|
+
*
|
|
8777
|
+
* The only mechanism to abort the returned stream is to use the abort signal in {@link SocketSyncStreamOptions}.
|
|
8453
8778
|
*
|
|
8454
|
-
* @param map Maps received payload frames to the typed event value.
|
|
8455
8779
|
* @param bson A BSON encoder and decoder. When set, the data stream will be requested with a BSON payload
|
|
8456
8780
|
* (required for compatibility with older sync services).
|
|
8457
8781
|
*/
|
|
8458
|
-
async socketStreamRaw(options,
|
|
8782
|
+
async socketStreamRaw(options, bson) {
|
|
8459
8783
|
const { path, fetchStrategy = exports.FetchStrategy.Buffered } = options;
|
|
8460
8784
|
const mimeType = bson == null ? 'application/json' : 'application/bson';
|
|
8461
8785
|
function toBuffer(js) {
|
|
@@ -8470,52 +8794,55 @@ class AbstractRemote {
|
|
|
8470
8794
|
}
|
|
8471
8795
|
const syncQueueRequestSize = fetchStrategy == exports.FetchStrategy.Buffered ? 10 : 1;
|
|
8472
8796
|
const request = await this.buildRequest(path);
|
|
8797
|
+
const url = this.options.socketUrlTransformer(request.url);
|
|
8473
8798
|
// Add the user agent in the setup payload - we can't set custom
|
|
8474
8799
|
// headers with websockets on web. The browser userAgent is however added
|
|
8475
8800
|
// automatically as a header.
|
|
8476
8801
|
const userAgent = this.getUserAgent();
|
|
8477
|
-
|
|
8478
|
-
|
|
8479
|
-
|
|
8480
|
-
|
|
8481
|
-
|
|
8482
|
-
|
|
8483
|
-
|
|
8802
|
+
// While we're connecting (a process that can't be aborted in RSocket), the WebSocket instance to close if we wanted
|
|
8803
|
+
// to abort the connection.
|
|
8804
|
+
let pendingSocket = null;
|
|
8805
|
+
let keepAliveTimeout;
|
|
8806
|
+
let rsocket = null;
|
|
8807
|
+
let queue = null;
|
|
8808
|
+
let didClose = false;
|
|
8809
|
+
const abortRequest = () => {
|
|
8810
|
+
if (didClose) {
|
|
8811
|
+
return;
|
|
8812
|
+
}
|
|
8813
|
+
didClose = true;
|
|
8814
|
+
clearTimeout(keepAliveTimeout);
|
|
8815
|
+
if (pendingSocket) {
|
|
8816
|
+
pendingSocket.close();
|
|
8817
|
+
}
|
|
8818
|
+
if (rsocket) {
|
|
8819
|
+
rsocket.close();
|
|
8820
|
+
}
|
|
8821
|
+
if (queue) {
|
|
8822
|
+
queue.stop();
|
|
8823
|
+
}
|
|
8824
|
+
};
|
|
8484
8825
|
// Handle upstream abort
|
|
8485
|
-
if (options.abortSignal
|
|
8826
|
+
if (options.abortSignal.aborted) {
|
|
8486
8827
|
throw new AbortOperation('Connection request aborted');
|
|
8487
8828
|
}
|
|
8488
8829
|
else {
|
|
8489
|
-
options.abortSignal
|
|
8490
|
-
stream.close();
|
|
8491
|
-
}, { once: true });
|
|
8830
|
+
options.abortSignal.addEventListener('abort', abortRequest);
|
|
8492
8831
|
}
|
|
8493
|
-
let keepAliveTimeout;
|
|
8494
8832
|
const resetTimeout = () => {
|
|
8495
8833
|
clearTimeout(keepAliveTimeout);
|
|
8496
8834
|
keepAliveTimeout = setTimeout(() => {
|
|
8497
8835
|
this.logger.error(`No data received on WebSocket in ${SOCKET_TIMEOUT_MS}ms, closing connection.`);
|
|
8498
|
-
|
|
8836
|
+
abortRequest();
|
|
8499
8837
|
}, SOCKET_TIMEOUT_MS);
|
|
8500
8838
|
};
|
|
8501
8839
|
resetTimeout();
|
|
8502
|
-
// Typescript complains about this being `never` if it's not assigned here.
|
|
8503
|
-
// This is assigned in `wsCreator`.
|
|
8504
|
-
let disposeSocketConnectionTimeout = () => { };
|
|
8505
|
-
const url = this.options.socketUrlTransformer(request.url);
|
|
8506
8840
|
const connector = new distExports.RSocketConnector({
|
|
8507
8841
|
transport: new WebsocketClientTransport({
|
|
8508
8842
|
url,
|
|
8509
8843
|
wsCreator: (url) => {
|
|
8510
|
-
const socket = this.createSocket(url);
|
|
8511
|
-
|
|
8512
|
-
closed: () => {
|
|
8513
|
-
// Allow closing the underlying WebSocket if the stream was closed before the
|
|
8514
|
-
// RSocket connect completed. This should effectively abort the request.
|
|
8515
|
-
socket.close();
|
|
8516
|
-
}
|
|
8517
|
-
});
|
|
8518
|
-
socket.addEventListener('message', (event) => {
|
|
8844
|
+
const socket = (pendingSocket = this.createSocket(url));
|
|
8845
|
+
socket.addEventListener('message', () => {
|
|
8519
8846
|
resetTimeout();
|
|
8520
8847
|
});
|
|
8521
8848
|
return socket;
|
|
@@ -8535,43 +8862,40 @@ class AbstractRemote {
|
|
|
8535
8862
|
}
|
|
8536
8863
|
}
|
|
8537
8864
|
});
|
|
8538
|
-
let rsocket;
|
|
8539
8865
|
try {
|
|
8540
8866
|
rsocket = await connector.connect();
|
|
8541
8867
|
// The connection is established, we no longer need to monitor the initial timeout
|
|
8542
|
-
|
|
8868
|
+
pendingSocket = null;
|
|
8543
8869
|
}
|
|
8544
8870
|
catch (ex) {
|
|
8545
8871
|
this.logger.error(`Failed to connect WebSocket`, ex);
|
|
8546
|
-
|
|
8547
|
-
if (!stream.closed) {
|
|
8548
|
-
await stream.close();
|
|
8549
|
-
}
|
|
8872
|
+
abortRequest();
|
|
8550
8873
|
throw ex;
|
|
8551
8874
|
}
|
|
8552
8875
|
resetTimeout();
|
|
8553
|
-
let socketIsClosed = false;
|
|
8554
|
-
const closeSocket = () => {
|
|
8555
|
-
clearTimeout(keepAliveTimeout);
|
|
8556
|
-
if (socketIsClosed) {
|
|
8557
|
-
return;
|
|
8558
|
-
}
|
|
8559
|
-
socketIsClosed = true;
|
|
8560
|
-
rsocket.close();
|
|
8561
|
-
};
|
|
8562
8876
|
// Helps to prevent double close scenarios
|
|
8563
|
-
rsocket.onClose(() => (
|
|
8564
|
-
|
|
8565
|
-
let pendingEventsCount = syncQueueRequestSize;
|
|
8566
|
-
const disposeClosedListener = stream.registerListener({
|
|
8567
|
-
closed: () => {
|
|
8568
|
-
closeSocket();
|
|
8569
|
-
disposeClosedListener();
|
|
8570
|
-
}
|
|
8571
|
-
});
|
|
8572
|
-
const socket = await new Promise((resolve, reject) => {
|
|
8877
|
+
rsocket.onClose(() => (rsocket = null));
|
|
8878
|
+
return await new Promise((resolve, reject) => {
|
|
8573
8879
|
let connectionEstablished = false;
|
|
8574
|
-
|
|
8880
|
+
let pendingEventsCount = syncQueueRequestSize;
|
|
8881
|
+
let paused = false;
|
|
8882
|
+
let res = null;
|
|
8883
|
+
function requestMore() {
|
|
8884
|
+
const delta = syncQueueRequestSize - pendingEventsCount;
|
|
8885
|
+
if (!paused && delta > 0) {
|
|
8886
|
+
res?.request(delta);
|
|
8887
|
+
pendingEventsCount = syncQueueRequestSize;
|
|
8888
|
+
}
|
|
8889
|
+
}
|
|
8890
|
+
const events = new eventIterator.EventIterator((q) => {
|
|
8891
|
+
queue = q;
|
|
8892
|
+
q.on('highWater', () => (paused = true));
|
|
8893
|
+
q.on('lowWater', () => {
|
|
8894
|
+
paused = false;
|
|
8895
|
+
requestMore();
|
|
8896
|
+
});
|
|
8897
|
+
}, { highWaterMark: SYNC_QUEUE_REQUEST_HIGH_WATER, lowWaterMark: SYNC_QUEUE_REQUEST_LOW_WATER })[Symbol.asyncIterator]();
|
|
8898
|
+
res = rsocket.requestStream({
|
|
8575
8899
|
data: toBuffer(options.data),
|
|
8576
8900
|
metadata: toBuffer({
|
|
8577
8901
|
path
|
|
@@ -8596,7 +8920,7 @@ class AbstractRemote {
|
|
|
8596
8920
|
}
|
|
8597
8921
|
// RSocket will close the RSocket stream automatically
|
|
8598
8922
|
// Close the downstream stream as well - this will close the RSocket connection and WebSocket
|
|
8599
|
-
|
|
8923
|
+
abortRequest();
|
|
8600
8924
|
// Handles cases where the connection failed e.g. auth error or connection error
|
|
8601
8925
|
if (!connectionEstablished) {
|
|
8602
8926
|
reject(e);
|
|
@@ -8606,41 +8930,40 @@ class AbstractRemote {
|
|
|
8606
8930
|
// The connection is active
|
|
8607
8931
|
if (!connectionEstablished) {
|
|
8608
8932
|
connectionEstablished = true;
|
|
8609
|
-
resolve(
|
|
8933
|
+
resolve(events);
|
|
8610
8934
|
}
|
|
8611
8935
|
const { data } = payload;
|
|
8936
|
+
if (data) {
|
|
8937
|
+
queue.push(data);
|
|
8938
|
+
}
|
|
8612
8939
|
// Less events are now pending
|
|
8613
8940
|
pendingEventsCount--;
|
|
8614
|
-
|
|
8615
|
-
|
|
8616
|
-
}
|
|
8617
|
-
stream.enqueueData(data);
|
|
8941
|
+
// Request another event (unless the downstream consumer is paused).
|
|
8942
|
+
requestMore();
|
|
8618
8943
|
},
|
|
8619
8944
|
onComplete: () => {
|
|
8620
|
-
|
|
8945
|
+
abortRequest(); // this will also emit a done event
|
|
8621
8946
|
},
|
|
8622
8947
|
onExtension: () => { }
|
|
8623
8948
|
});
|
|
8624
8949
|
});
|
|
8625
|
-
const l = stream.registerListener({
|
|
8626
|
-
lowWater: async () => {
|
|
8627
|
-
// Request to fill up the queue
|
|
8628
|
-
const required = syncQueueRequestSize - pendingEventsCount;
|
|
8629
|
-
if (required > 0) {
|
|
8630
|
-
socket.request(syncQueueRequestSize - pendingEventsCount);
|
|
8631
|
-
pendingEventsCount = syncQueueRequestSize;
|
|
8632
|
-
}
|
|
8633
|
-
},
|
|
8634
|
-
closed: () => {
|
|
8635
|
-
l();
|
|
8636
|
-
}
|
|
8637
|
-
});
|
|
8638
|
-
return stream;
|
|
8639
8950
|
}
|
|
8640
8951
|
/**
|
|
8641
|
-
*
|
|
8952
|
+
* @returns Whether the HTTP implementation on this platform can receive streamed binary responses. This is true on
|
|
8953
|
+
* all platforms except React Native (who would have guessed...), where we must not request BSON responses.
|
|
8954
|
+
*
|
|
8955
|
+
* @see https://github.com/react-native-community/fetch?tab=readme-ov-file#motivation
|
|
8642
8956
|
*/
|
|
8643
|
-
|
|
8957
|
+
get supportsStreamingBinaryResponses() {
|
|
8958
|
+
return true;
|
|
8959
|
+
}
|
|
8960
|
+
/**
|
|
8961
|
+
* Posts a `/sync/stream` request, asserts that it completes successfully and returns the streaming response as an
|
|
8962
|
+
* async iterator of byte blobs.
|
|
8963
|
+
*
|
|
8964
|
+
* To cancel the async iterator, use the abort signal from {@link SyncStreamOptions} passed to this method.
|
|
8965
|
+
*/
|
|
8966
|
+
async fetchStreamRaw(options) {
|
|
8644
8967
|
const { data, path, headers, abortSignal } = options;
|
|
8645
8968
|
const request = await this.buildRequest(path);
|
|
8646
8969
|
/**
|
|
@@ -8652,119 +8975,94 @@ class AbstractRemote {
|
|
|
8652
8975
|
* Aborting the active fetch request while it is being consumed seems to throw
|
|
8653
8976
|
* an unhandled exception on the window level.
|
|
8654
8977
|
*/
|
|
8655
|
-
if (abortSignal
|
|
8656
|
-
throw new AbortOperation('Abort request received before making
|
|
8978
|
+
if (abortSignal.aborted) {
|
|
8979
|
+
throw new AbortOperation('Abort request received before making fetchStreamRaw request');
|
|
8657
8980
|
}
|
|
8658
8981
|
const controller = new AbortController();
|
|
8659
|
-
let
|
|
8660
|
-
abortSignal
|
|
8661
|
-
|
|
8982
|
+
let reader = null;
|
|
8983
|
+
abortSignal.addEventListener('abort', () => {
|
|
8984
|
+
const reason = abortSignal.reason ??
|
|
8985
|
+
new AbortOperation('Cancelling network request before it resolves. Abort signal has been received.');
|
|
8986
|
+
if (reader == null) {
|
|
8662
8987
|
// Only abort via the abort controller if the request has not resolved yet
|
|
8663
|
-
controller.abort(
|
|
8664
|
-
|
|
8988
|
+
controller.abort(reason);
|
|
8989
|
+
}
|
|
8990
|
+
else {
|
|
8991
|
+
reader.cancel(reason).catch(() => {
|
|
8992
|
+
// Cancelling the reader might rethrow an exception we would have handled by throwing in next(). So we can
|
|
8993
|
+
// ignore it here.
|
|
8994
|
+
});
|
|
8665
8995
|
}
|
|
8666
8996
|
});
|
|
8667
|
-
|
|
8668
|
-
|
|
8669
|
-
|
|
8670
|
-
|
|
8671
|
-
|
|
8672
|
-
|
|
8673
|
-
|
|
8674
|
-
|
|
8675
|
-
|
|
8997
|
+
let res;
|
|
8998
|
+
let responseIsBson = false;
|
|
8999
|
+
try {
|
|
9000
|
+
const ndJson = 'application/x-ndjson';
|
|
9001
|
+
const bson = 'application/vnd.powersync.bson-stream';
|
|
9002
|
+
res = await this.fetch(request.url, {
|
|
9003
|
+
method: 'POST',
|
|
9004
|
+
headers: {
|
|
9005
|
+
...headers,
|
|
9006
|
+
...request.headers,
|
|
9007
|
+
accept: this.supportsStreamingBinaryResponses ? `${bson};q=0.9,${ndJson};q=0.8` : ndJson
|
|
9008
|
+
},
|
|
9009
|
+
body: JSON.stringify(data),
|
|
9010
|
+
signal: controller.signal,
|
|
9011
|
+
cache: 'no-store',
|
|
9012
|
+
...(this.options.fetchOptions ?? {}),
|
|
9013
|
+
...options.fetchOptions
|
|
9014
|
+
});
|
|
9015
|
+
if (!res.ok || !res.body) {
|
|
9016
|
+
const text = await res.text();
|
|
9017
|
+
this.logger.error(`Could not POST streaming to ${path} - ${res.status} - ${res.statusText}: ${text}`);
|
|
9018
|
+
const error = new Error(`HTTP ${res.statusText}: ${text}`);
|
|
9019
|
+
error.status = res.status;
|
|
9020
|
+
throw error;
|
|
9021
|
+
}
|
|
9022
|
+
const contentType = res.headers.get('content-type');
|
|
9023
|
+
responseIsBson = contentType == bson;
|
|
9024
|
+
}
|
|
9025
|
+
catch (ex) {
|
|
8676
9026
|
if (ex.name == 'AbortError') {
|
|
8677
9027
|
throw new AbortOperation(`Pending fetch request to ${request.url} has been aborted.`);
|
|
8678
9028
|
}
|
|
8679
9029
|
throw ex;
|
|
8680
|
-
});
|
|
8681
|
-
if (!res) {
|
|
8682
|
-
throw new Error('Fetch request was aborted');
|
|
8683
|
-
}
|
|
8684
|
-
requestResolved = true;
|
|
8685
|
-
if (!res.ok || !res.body) {
|
|
8686
|
-
const text = await res.text();
|
|
8687
|
-
this.logger.error(`Could not POST streaming to ${path} - ${res.status} - ${res.statusText}: ${text}`);
|
|
8688
|
-
const error = new Error(`HTTP ${res.statusText}: ${text}`);
|
|
8689
|
-
error.status = res.status;
|
|
8690
|
-
throw error;
|
|
8691
9030
|
}
|
|
8692
|
-
|
|
8693
|
-
|
|
8694
|
-
|
|
8695
|
-
|
|
8696
|
-
|
|
8697
|
-
const closeReader = async () => {
|
|
8698
|
-
try {
|
|
8699
|
-
readerReleased = true;
|
|
8700
|
-
await reader.cancel();
|
|
8701
|
-
}
|
|
8702
|
-
catch (ex) {
|
|
8703
|
-
// an error will throw if the reader hasn't been used yet
|
|
8704
|
-
}
|
|
8705
|
-
reader.releaseLock();
|
|
8706
|
-
};
|
|
8707
|
-
const stream = new DataStream({
|
|
8708
|
-
logger: this.logger,
|
|
8709
|
-
mapLine: mapLine,
|
|
8710
|
-
pressure: {
|
|
8711
|
-
highWaterMark: 20,
|
|
8712
|
-
lowWaterMark: 10
|
|
8713
|
-
}
|
|
8714
|
-
});
|
|
8715
|
-
abortSignal?.addEventListener('abort', () => {
|
|
8716
|
-
closeReader();
|
|
8717
|
-
stream.close();
|
|
8718
|
-
});
|
|
8719
|
-
const decoder = this.createTextDecoder();
|
|
8720
|
-
let buffer = '';
|
|
8721
|
-
const consumeStream = async () => {
|
|
8722
|
-
while (!stream.closed && !abortSignal?.aborted && !readerReleased) {
|
|
8723
|
-
const { done, value } = await reader.read();
|
|
8724
|
-
if (done) {
|
|
8725
|
-
const remaining = buffer.trim();
|
|
8726
|
-
if (remaining.length != 0) {
|
|
8727
|
-
stream.enqueueData(remaining);
|
|
8728
|
-
}
|
|
8729
|
-
stream.close();
|
|
8730
|
-
await closeReader();
|
|
8731
|
-
return;
|
|
9031
|
+
reader = res.body.getReader();
|
|
9032
|
+
const stream = {
|
|
9033
|
+
next: async () => {
|
|
9034
|
+
if (controller.signal.aborted) {
|
|
9035
|
+
return doneResult;
|
|
8732
9036
|
}
|
|
8733
|
-
|
|
8734
|
-
|
|
8735
|
-
const lines = buffer.split('\n');
|
|
8736
|
-
for (var i = 0; i < lines.length - 1; i++) {
|
|
8737
|
-
var l = lines[i].trim();
|
|
8738
|
-
if (l.length > 0) {
|
|
8739
|
-
stream.enqueueData(l);
|
|
8740
|
-
}
|
|
9037
|
+
try {
|
|
9038
|
+
return await reader.read();
|
|
8741
9039
|
}
|
|
8742
|
-
|
|
8743
|
-
|
|
8744
|
-
|
|
8745
|
-
|
|
8746
|
-
|
|
8747
|
-
|
|
8748
|
-
|
|
8749
|
-
dispose();
|
|
8750
|
-
},
|
|
8751
|
-
closed: () => {
|
|
8752
|
-
resolve();
|
|
8753
|
-
dispose();
|
|
8754
|
-
}
|
|
8755
|
-
});
|
|
8756
|
-
});
|
|
9040
|
+
catch (ex) {
|
|
9041
|
+
if (controller.signal.aborted) {
|
|
9042
|
+
// .read() completes with an error if we cancel the reader, which we do to disconnect. So this is just
|
|
9043
|
+
// things working as intended, we can return a done event and consider the exception handled.
|
|
9044
|
+
return doneResult;
|
|
9045
|
+
}
|
|
9046
|
+
throw ex;
|
|
8757
9047
|
}
|
|
8758
9048
|
}
|
|
8759
9049
|
};
|
|
8760
|
-
|
|
8761
|
-
|
|
8762
|
-
|
|
8763
|
-
|
|
8764
|
-
|
|
8765
|
-
|
|
8766
|
-
|
|
8767
|
-
|
|
9050
|
+
return { isBson: responseIsBson, stream };
|
|
9051
|
+
}
|
|
9052
|
+
/**
|
|
9053
|
+
* Posts a `/sync/stream` request.
|
|
9054
|
+
*
|
|
9055
|
+
* Depending on the `Content-Type` of the response, this returns strings for sync lines or encoded BSON documents as
|
|
9056
|
+
* {@link Uint8Array}s.
|
|
9057
|
+
*/
|
|
9058
|
+
async fetchStream(options) {
|
|
9059
|
+
const { isBson, stream } = await this.fetchStreamRaw(options);
|
|
9060
|
+
if (isBson) {
|
|
9061
|
+
return extractBsonObjects(stream);
|
|
9062
|
+
}
|
|
9063
|
+
else {
|
|
9064
|
+
return extractJsonLines(stream, this.createTextDecoder());
|
|
9065
|
+
}
|
|
8768
9066
|
}
|
|
8769
9067
|
}
|
|
8770
9068
|
|
|
@@ -8899,6 +9197,7 @@ class AbstractStreamingSyncImplementation extends BaseObserver {
|
|
|
8899
9197
|
streamingSyncPromise;
|
|
8900
9198
|
logger;
|
|
8901
9199
|
activeStreams;
|
|
9200
|
+
connectionMayHaveChanged = false;
|
|
8902
9201
|
isUploadingCrud = false;
|
|
8903
9202
|
notifyCompletedUploads;
|
|
8904
9203
|
handleActiveStreamsChange;
|
|
@@ -9178,6 +9477,11 @@ The next upload iteration will be delayed.`);
|
|
|
9178
9477
|
shouldDelayRetry = false;
|
|
9179
9478
|
// A disconnect was requested, we should not delay since there is no explicit retry
|
|
9180
9479
|
}
|
|
9480
|
+
else if (this.connectionMayHaveChanged && ex.message?.indexOf('No iteration is active') >= 0) {
|
|
9481
|
+
this.connectionMayHaveChanged = false;
|
|
9482
|
+
this.logger.info('Sync error after changed connection, retrying immediately');
|
|
9483
|
+
shouldDelayRetry = false;
|
|
9484
|
+
}
|
|
9181
9485
|
else {
|
|
9182
9486
|
this.logger.error(ex);
|
|
9183
9487
|
}
|
|
@@ -9208,6 +9512,15 @@ The next upload iteration will be delayed.`);
|
|
|
9208
9512
|
// Mark as disconnected if here
|
|
9209
9513
|
this.updateSyncStatus({ connected: false, connecting: false });
|
|
9210
9514
|
}
|
|
9515
|
+
markConnectionMayHaveChanged() {
|
|
9516
|
+
// By setting this field, we'll immediately retry if the next sync event causes an error triggered by us not having
|
|
9517
|
+
// an active sync iteration on the connection in use.
|
|
9518
|
+
this.connectionMayHaveChanged = true;
|
|
9519
|
+
// This triggers a `powersync_control` invocation if a sync iteration is currently active. This is a cheap call to
|
|
9520
|
+
// make when no subscriptions have actually changed, we're mainly interested in this immediately throwing if no
|
|
9521
|
+
// iteration is active. That allows us to reconnect ASAP, instead of having to wait for the next sync line.
|
|
9522
|
+
this.handleActiveStreamsChange?.();
|
|
9523
|
+
}
|
|
9211
9524
|
async collectLocalBucketState() {
|
|
9212
9525
|
const bucketEntries = await this.options.adapter.getBucketStates();
|
|
9213
9526
|
const req = bucketEntries.map((entry) => ({
|
|
@@ -9272,6 +9585,19 @@ The next upload iteration will be delayed.`);
|
|
|
9272
9585
|
}
|
|
9273
9586
|
});
|
|
9274
9587
|
}
|
|
9588
|
+
async receiveSyncLines(data) {
|
|
9589
|
+
const { options, connection, bson } = data;
|
|
9590
|
+
const remote = this.options.remote;
|
|
9591
|
+
if (connection.connectionMethod == exports.SyncStreamConnectionMethod.HTTP) {
|
|
9592
|
+
return await remote.fetchStream(options);
|
|
9593
|
+
}
|
|
9594
|
+
else {
|
|
9595
|
+
return await this.options.remote.socketStreamRaw({
|
|
9596
|
+
...options,
|
|
9597
|
+
...{ fetchStrategy: connection.fetchStrategy }
|
|
9598
|
+
}, bson);
|
|
9599
|
+
}
|
|
9600
|
+
}
|
|
9275
9601
|
async legacyStreamingSyncIteration(signal, resolvedOptions) {
|
|
9276
9602
|
const rawTables = resolvedOptions.serializedSchema?.raw_tables;
|
|
9277
9603
|
if (rawTables != null && rawTables.length) {
|
|
@@ -9301,42 +9627,27 @@ The next upload iteration will be delayed.`);
|
|
|
9301
9627
|
client_id: clientId
|
|
9302
9628
|
}
|
|
9303
9629
|
};
|
|
9304
|
-
|
|
9305
|
-
|
|
9306
|
-
|
|
9307
|
-
|
|
9308
|
-
|
|
9309
|
-
|
|
9310
|
-
|
|
9311
|
-
|
|
9312
|
-
|
|
9313
|
-
|
|
9314
|
-
|
|
9315
|
-
|
|
9316
|
-
|
|
9317
|
-
|
|
9318
|
-
stream = await this.options.remote.socketStreamRaw({
|
|
9319
|
-
...syncOptions,
|
|
9320
|
-
...{ fetchStrategy: resolvedOptions.fetchStrategy }
|
|
9321
|
-
}, (payload) => {
|
|
9322
|
-
if (payload instanceof Uint8Array) {
|
|
9323
|
-
return bson.deserialize(payload);
|
|
9324
|
-
}
|
|
9325
|
-
else {
|
|
9326
|
-
// Directly enqueued by us
|
|
9327
|
-
return payload;
|
|
9328
|
-
}
|
|
9329
|
-
}, bson);
|
|
9330
|
-
}
|
|
9630
|
+
const bson = await this.options.remote.getBSON();
|
|
9631
|
+
const source = await this.receiveSyncLines({
|
|
9632
|
+
options: syncOptions,
|
|
9633
|
+
connection: resolvedOptions,
|
|
9634
|
+
bson
|
|
9635
|
+
});
|
|
9636
|
+
const stream = injectable(map(source, (line) => {
|
|
9637
|
+
if (typeof line == 'string') {
|
|
9638
|
+
return JSON.parse(line);
|
|
9639
|
+
}
|
|
9640
|
+
else {
|
|
9641
|
+
return bson.deserialize(line);
|
|
9642
|
+
}
|
|
9643
|
+
}));
|
|
9331
9644
|
this.logger.debug('Stream established. Processing events');
|
|
9332
9645
|
this.notifyCompletedUploads = () => {
|
|
9333
|
-
|
|
9334
|
-
stream.enqueueData({ crud_upload_completed: null });
|
|
9335
|
-
}
|
|
9646
|
+
stream.inject({ crud_upload_completed: null });
|
|
9336
9647
|
};
|
|
9337
|
-
while (
|
|
9338
|
-
const line = await stream.
|
|
9339
|
-
if (
|
|
9648
|
+
while (true) {
|
|
9649
|
+
const { value: line, done } = await stream.next();
|
|
9650
|
+
if (done) {
|
|
9340
9651
|
// The stream has closed while waiting
|
|
9341
9652
|
return;
|
|
9342
9653
|
}
|
|
@@ -9515,14 +9826,17 @@ The next upload iteration will be delayed.`);
|
|
|
9515
9826
|
const syncImplementation = this;
|
|
9516
9827
|
const adapter = this.options.adapter;
|
|
9517
9828
|
const remote = this.options.remote;
|
|
9829
|
+
const controller = new AbortController();
|
|
9830
|
+
const abort = () => {
|
|
9831
|
+
return controller.abort(signal.reason);
|
|
9832
|
+
};
|
|
9833
|
+
signal.addEventListener('abort', abort);
|
|
9518
9834
|
let receivingLines = null;
|
|
9519
9835
|
let hadSyncLine = false;
|
|
9520
9836
|
let hideDisconnectOnRestart = false;
|
|
9521
9837
|
if (signal.aborted) {
|
|
9522
9838
|
throw new AbortOperation('Connection request has been aborted');
|
|
9523
9839
|
}
|
|
9524
|
-
const abortController = new AbortController();
|
|
9525
|
-
signal.addEventListener('abort', () => abortController.abort());
|
|
9526
9840
|
// Pending sync lines received from the service, as well as local events that trigger a powersync_control
|
|
9527
9841
|
// invocation (local events include refreshed tokens and completed uploads).
|
|
9528
9842
|
// This is a single data stream so that we can handle all control calls from a single place.
|
|
@@ -9530,49 +9844,36 @@ The next upload iteration will be delayed.`);
|
|
|
9530
9844
|
async function connect(instr) {
|
|
9531
9845
|
const syncOptions = {
|
|
9532
9846
|
path: '/sync/stream',
|
|
9533
|
-
abortSignal:
|
|
9847
|
+
abortSignal: controller.signal,
|
|
9534
9848
|
data: instr.request
|
|
9535
9849
|
};
|
|
9536
|
-
|
|
9537
|
-
|
|
9538
|
-
|
|
9539
|
-
|
|
9540
|
-
|
|
9541
|
-
|
|
9542
|
-
|
|
9543
|
-
|
|
9544
|
-
|
|
9545
|
-
|
|
9546
|
-
|
|
9547
|
-
|
|
9548
|
-
|
|
9549
|
-
|
|
9550
|
-
|
|
9551
|
-
|
|
9552
|
-
|
|
9553
|
-
fetchStrategy: resolvedOptions.fetchStrategy
|
|
9554
|
-
}, (payload) => {
|
|
9555
|
-
if (payload instanceof Uint8Array) {
|
|
9556
|
-
return {
|
|
9557
|
-
command: exports.PowerSyncControlCommand.PROCESS_BSON_LINE,
|
|
9558
|
-
payload: payload
|
|
9559
|
-
};
|
|
9560
|
-
}
|
|
9561
|
-
else {
|
|
9562
|
-
// Directly enqueued by us
|
|
9563
|
-
return payload;
|
|
9564
|
-
}
|
|
9565
|
-
});
|
|
9566
|
-
}
|
|
9850
|
+
controlInvocations = injectable(map(await syncImplementation.receiveSyncLines({
|
|
9851
|
+
options: syncOptions,
|
|
9852
|
+
connection: resolvedOptions
|
|
9853
|
+
}), (line) => {
|
|
9854
|
+
if (typeof line == 'string') {
|
|
9855
|
+
return {
|
|
9856
|
+
command: exports.PowerSyncControlCommand.PROCESS_TEXT_LINE,
|
|
9857
|
+
payload: line
|
|
9858
|
+
};
|
|
9859
|
+
}
|
|
9860
|
+
else {
|
|
9861
|
+
return {
|
|
9862
|
+
command: exports.PowerSyncControlCommand.PROCESS_BSON_LINE,
|
|
9863
|
+
payload: line
|
|
9864
|
+
};
|
|
9865
|
+
}
|
|
9866
|
+
}));
|
|
9567
9867
|
// The rust client will set connected: true after the first sync line because that's when it gets invoked, but
|
|
9568
9868
|
// we're already connected here and can report that.
|
|
9569
9869
|
syncImplementation.updateSyncStatus({ connected: true });
|
|
9570
9870
|
try {
|
|
9571
|
-
while (
|
|
9572
|
-
|
|
9573
|
-
if (
|
|
9574
|
-
|
|
9871
|
+
while (true) {
|
|
9872
|
+
let event = await controlInvocations.next();
|
|
9873
|
+
if (event.done) {
|
|
9874
|
+
break;
|
|
9575
9875
|
}
|
|
9876
|
+
const line = event.value;
|
|
9576
9877
|
await control(line.command, line.payload);
|
|
9577
9878
|
if (!hadSyncLine) {
|
|
9578
9879
|
syncImplementation.triggerCrudUpload();
|
|
@@ -9581,12 +9882,8 @@ The next upload iteration will be delayed.`);
|
|
|
9581
9882
|
}
|
|
9582
9883
|
}
|
|
9583
9884
|
finally {
|
|
9584
|
-
|
|
9585
|
-
|
|
9586
|
-
// refreshed. That would throw after closing (and we can't handle those events either way), so set this back
|
|
9587
|
-
// to null.
|
|
9588
|
-
controlInvocations = null;
|
|
9589
|
-
await activeInstructions.close();
|
|
9885
|
+
abort();
|
|
9886
|
+
signal.removeEventListener('abort', abort);
|
|
9590
9887
|
}
|
|
9591
9888
|
}
|
|
9592
9889
|
async function stop() {
|
|
@@ -9596,6 +9893,10 @@ The next upload iteration will be delayed.`);
|
|
|
9596
9893
|
const rawResponse = await adapter.control(op, payload ?? null);
|
|
9597
9894
|
const logger = syncImplementation.logger;
|
|
9598
9895
|
logger.trace('powersync_control', op, payload == null || typeof payload == 'string' ? payload : '<bytes>', rawResponse);
|
|
9896
|
+
if (op != exports.PowerSyncControlCommand.STOP) {
|
|
9897
|
+
// Evidently we have a working connection here, otherwise powersync_control would have failed.
|
|
9898
|
+
syncImplementation.connectionMayHaveChanged = false;
|
|
9899
|
+
}
|
|
9599
9900
|
await handleInstructions(JSON.parse(rawResponse));
|
|
9600
9901
|
}
|
|
9601
9902
|
async function handleInstruction(instruction) {
|
|
@@ -9630,14 +9931,14 @@ The next upload iteration will be delayed.`);
|
|
|
9630
9931
|
remote.invalidateCredentials();
|
|
9631
9932
|
// Restart iteration after the credentials have been refreshed.
|
|
9632
9933
|
remote.fetchCredentials().then((_) => {
|
|
9633
|
-
controlInvocations?.
|
|
9934
|
+
controlInvocations?.inject({ command: exports.PowerSyncControlCommand.NOTIFY_TOKEN_REFRESHED });
|
|
9634
9935
|
}, (err) => {
|
|
9635
9936
|
syncImplementation.logger.warn('Could not prefetch credentials', err);
|
|
9636
9937
|
});
|
|
9637
9938
|
}
|
|
9638
9939
|
}
|
|
9639
9940
|
else if ('CloseSyncStream' in instruction) {
|
|
9640
|
-
|
|
9941
|
+
controller.abort();
|
|
9641
9942
|
hideDisconnectOnRestart = instruction.CloseSyncStream.hide_disconnect;
|
|
9642
9943
|
}
|
|
9643
9944
|
else if ('FlushFileSystem' in instruction) ;
|
|
@@ -9666,17 +9967,13 @@ The next upload iteration will be delayed.`);
|
|
|
9666
9967
|
}
|
|
9667
9968
|
await control(exports.PowerSyncControlCommand.START, JSON.stringify(options));
|
|
9668
9969
|
this.notifyCompletedUploads = () => {
|
|
9669
|
-
|
|
9670
|
-
controlInvocations.enqueueData({ command: exports.PowerSyncControlCommand.NOTIFY_CRUD_UPLOAD_COMPLETED });
|
|
9671
|
-
}
|
|
9970
|
+
controlInvocations?.inject({ command: exports.PowerSyncControlCommand.NOTIFY_CRUD_UPLOAD_COMPLETED });
|
|
9672
9971
|
};
|
|
9673
9972
|
this.handleActiveStreamsChange = () => {
|
|
9674
|
-
|
|
9675
|
-
|
|
9676
|
-
|
|
9677
|
-
|
|
9678
|
-
});
|
|
9679
|
-
}
|
|
9973
|
+
controlInvocations?.inject({
|
|
9974
|
+
command: exports.PowerSyncControlCommand.UPDATE_SUBSCRIPTIONS,
|
|
9975
|
+
payload: JSON.stringify(this.activeStreams)
|
|
9976
|
+
});
|
|
9680
9977
|
};
|
|
9681
9978
|
await receivingLines;
|
|
9682
9979
|
}
|
|
@@ -10327,7 +10624,7 @@ class AbstractPowerSyncDatabase extends BaseObserver {
|
|
|
10327
10624
|
this._schema = schema;
|
|
10328
10625
|
this.ready = false;
|
|
10329
10626
|
this.sdkVersion = '';
|
|
10330
|
-
this.runExclusiveMutex = new
|
|
10627
|
+
this.runExclusiveMutex = new Mutex();
|
|
10331
10628
|
// Start async init
|
|
10332
10629
|
this.subscriptions = {
|
|
10333
10630
|
firstStatusMatching: (predicate, abort) => this.waitForStatus(predicate, abort),
|
|
@@ -10693,7 +10990,7 @@ class AbstractPowerSyncDatabase extends BaseObserver {
|
|
|
10693
10990
|
* @returns A transaction of CRUD operations to upload, or null if there are none
|
|
10694
10991
|
*/
|
|
10695
10992
|
async getNextCrudTransaction() {
|
|
10696
|
-
const iterator = this.getCrudTransactions()[
|
|
10993
|
+
const iterator = this.getCrudTransactions()[Symbol.asyncIterator]();
|
|
10697
10994
|
return (await iterator.next()).value;
|
|
10698
10995
|
}
|
|
10699
10996
|
/**
|
|
@@ -10729,7 +11026,7 @@ class AbstractPowerSyncDatabase extends BaseObserver {
|
|
|
10729
11026
|
*/
|
|
10730
11027
|
getCrudTransactions() {
|
|
10731
11028
|
return {
|
|
10732
|
-
[
|
|
11029
|
+
[Symbol.asyncIterator]: () => {
|
|
10733
11030
|
let lastCrudItemId = -1;
|
|
10734
11031
|
const sql = `
|
|
10735
11032
|
WITH RECURSIVE crud_entries AS (
|
|
@@ -10792,6 +11089,10 @@ SELECT * FROM crud_entries;
|
|
|
10792
11089
|
* Execute a SQL write (INSERT/UPDATE/DELETE) query
|
|
10793
11090
|
* and optionally return results.
|
|
10794
11091
|
*
|
|
11092
|
+
* When using the default client-side [JSON-based view system](https://docs.powersync.com/architecture/client-architecture#client-side-schema-and-sqlite-database-structure),
|
|
11093
|
+
* the returned result's `rowsAffected` may be `0` for successful `UPDATE` and `DELETE` statements.
|
|
11094
|
+
* Use a `RETURNING` clause and inspect `result.rows` when you need to confirm which rows changed.
|
|
11095
|
+
*
|
|
10795
11096
|
* @param sql The SQL query to execute
|
|
10796
11097
|
* @param parameters Optional array of parameters to bind to the query
|
|
10797
11098
|
* @returns The query result as an object with structured key-value pairs
|
|
@@ -10888,7 +11189,7 @@ SELECT * FROM crud_entries;
|
|
|
10888
11189
|
async readTransaction(callback, lockTimeout = DEFAULT_LOCK_TIMEOUT_MS) {
|
|
10889
11190
|
await this.waitForReady();
|
|
10890
11191
|
return this.database.readTransaction(async (tx) => {
|
|
10891
|
-
const res = await callback(
|
|
11192
|
+
const res = await callback(tx);
|
|
10892
11193
|
await tx.rollback();
|
|
10893
11194
|
return res;
|
|
10894
11195
|
}, { timeoutMs: lockTimeout });
|
|
@@ -11665,10 +11966,8 @@ class Schema {
|
|
|
11665
11966
|
* developer instead of automatically by PowerSync.
|
|
11666
11967
|
* Since raw tables are not backed by JSON, running complex queries on them may be more efficient. Further, they allow
|
|
11667
11968
|
* using client-side table and column constraints.
|
|
11668
|
-
* Note that raw tables are only supported when using the new `SyncClientImplementation.rust` sync client.
|
|
11669
11969
|
*
|
|
11670
11970
|
* @param tables An object of (table name, raw table definition) entries.
|
|
11671
|
-
* @experimental Note that the raw tables API is still experimental and may change in the future.
|
|
11672
11971
|
*/
|
|
11673
11972
|
withRawTables(tables) {
|
|
11674
11973
|
for (const [name, rawTableDefinition] of Object.entries(tables)) {
|
|
@@ -11885,6 +12184,8 @@ exports.ControlledExecutor = ControlledExecutor;
|
|
|
11885
12184
|
exports.CrudBatch = CrudBatch;
|
|
11886
12185
|
exports.CrudEntry = CrudEntry;
|
|
11887
12186
|
exports.CrudTransaction = CrudTransaction;
|
|
12187
|
+
exports.DBAdapterDefaultMixin = DBAdapterDefaultMixin;
|
|
12188
|
+
exports.DBGetUtilsDefaultMixin = DBGetUtilsDefaultMixin;
|
|
11888
12189
|
exports.DEFAULT_CRUD_BATCH_LIMIT = DEFAULT_CRUD_BATCH_LIMIT;
|
|
11889
12190
|
exports.DEFAULT_CRUD_UPLOAD_THROTTLE_MS = DEFAULT_CRUD_UPLOAD_THROTTLE_MS;
|
|
11890
12191
|
exports.DEFAULT_INDEX_COLUMN_OPTIONS = DEFAULT_INDEX_COLUMN_OPTIONS;
|
|
@@ -11892,7 +12193,6 @@ exports.DEFAULT_INDEX_OPTIONS = DEFAULT_INDEX_OPTIONS;
|
|
|
11892
12193
|
exports.DEFAULT_LOCK_TIMEOUT_MS = DEFAULT_LOCK_TIMEOUT_MS;
|
|
11893
12194
|
exports.DEFAULT_POWERSYNC_CLOSE_OPTIONS = DEFAULT_POWERSYNC_CLOSE_OPTIONS;
|
|
11894
12195
|
exports.DEFAULT_POWERSYNC_DB_OPTIONS = DEFAULT_POWERSYNC_DB_OPTIONS;
|
|
11895
|
-
exports.DEFAULT_PRESSURE_LIMITS = DEFAULT_PRESSURE_LIMITS;
|
|
11896
12196
|
exports.DEFAULT_REMOTE_LOGGER = DEFAULT_REMOTE_LOGGER;
|
|
11897
12197
|
exports.DEFAULT_REMOTE_OPTIONS = DEFAULT_REMOTE_OPTIONS;
|
|
11898
12198
|
exports.DEFAULT_RETRY_DELAY_MS = DEFAULT_RETRY_DELAY_MS;
|
|
@@ -11903,7 +12203,6 @@ exports.DEFAULT_SYNC_CLIENT_IMPLEMENTATION = DEFAULT_SYNC_CLIENT_IMPLEMENTATION;
|
|
|
11903
12203
|
exports.DEFAULT_TABLE_OPTIONS = DEFAULT_TABLE_OPTIONS;
|
|
11904
12204
|
exports.DEFAULT_WATCH_QUERY_OPTIONS = DEFAULT_WATCH_QUERY_OPTIONS;
|
|
11905
12205
|
exports.DEFAULT_WATCH_THROTTLE_MS = DEFAULT_WATCH_THROTTLE_MS;
|
|
11906
|
-
exports.DataStream = DataStream;
|
|
11907
12206
|
exports.DifferentialQueryProcessor = DifferentialQueryProcessor;
|
|
11908
12207
|
exports.EMPTY_DIFFERENTIAL = EMPTY_DIFFERENTIAL;
|
|
11909
12208
|
exports.FalsyComparator = FalsyComparator;
|
|
@@ -11916,10 +12215,12 @@ exports.LogLevel = LogLevel;
|
|
|
11916
12215
|
exports.MAX_AMOUNT_OF_COLUMNS = MAX_AMOUNT_OF_COLUMNS;
|
|
11917
12216
|
exports.MAX_OP_ID = MAX_OP_ID;
|
|
11918
12217
|
exports.MEMORY_TRIGGER_CLAIM_MANAGER = MEMORY_TRIGGER_CLAIM_MANAGER;
|
|
12218
|
+
exports.Mutex = Mutex;
|
|
11919
12219
|
exports.OnChangeQueryProcessor = OnChangeQueryProcessor;
|
|
11920
12220
|
exports.OpType = OpType;
|
|
11921
12221
|
exports.OplogEntry = OplogEntry;
|
|
11922
12222
|
exports.Schema = Schema;
|
|
12223
|
+
exports.Semaphore = Semaphore;
|
|
11923
12224
|
exports.SqliteBucketStorage = SqliteBucketStorage;
|
|
11924
12225
|
exports.SyncDataBatch = SyncDataBatch;
|
|
11925
12226
|
exports.SyncDataBucket = SyncDataBucket;
|
|
@@ -11949,9 +12250,9 @@ exports.isStreamingSyncCheckpointDiff = isStreamingSyncCheckpointDiff;
|
|
|
11949
12250
|
exports.isStreamingSyncCheckpointPartiallyComplete = isStreamingSyncCheckpointPartiallyComplete;
|
|
11950
12251
|
exports.isStreamingSyncData = isStreamingSyncData;
|
|
11951
12252
|
exports.isSyncNewCheckpointRequest = isSyncNewCheckpointRequest;
|
|
11952
|
-
exports.mutexRunExclusive = mutexRunExclusive;
|
|
11953
12253
|
exports.parseQuery = parseQuery;
|
|
11954
12254
|
exports.runOnSchemaChange = runOnSchemaChange;
|
|
11955
12255
|
exports.sanitizeSQL = sanitizeSQL;
|
|
11956
12256
|
exports.sanitizeUUID = sanitizeUUID;
|
|
12257
|
+
exports.timeoutSignal = timeoutSignal;
|
|
11957
12258
|
//# sourceMappingURL=bundle.node.cjs.map
|