@powersync/common 0.0.0-dev-20260311081226 → 0.0.0-dev-20260414110516

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. package/dist/bundle.cjs +775 -485
  2. package/dist/bundle.cjs.map +1 -1
  3. package/dist/bundle.mjs +769 -481
  4. package/dist/bundle.mjs.map +1 -1
  5. package/dist/bundle.node.cjs +773 -484
  6. package/dist/bundle.node.cjs.map +1 -1
  7. package/dist/bundle.node.mjs +767 -480
  8. package/dist/bundle.node.mjs.map +1 -1
  9. package/dist/index.d.cts +175 -94
  10. package/lib/attachments/AttachmentQueue.d.ts +10 -4
  11. package/lib/attachments/AttachmentQueue.js +10 -4
  12. package/lib/attachments/AttachmentQueue.js.map +1 -1
  13. package/lib/attachments/AttachmentService.js +2 -3
  14. package/lib/attachments/AttachmentService.js.map +1 -1
  15. package/lib/attachments/SyncingService.d.ts +2 -1
  16. package/lib/attachments/SyncingService.js +4 -5
  17. package/lib/attachments/SyncingService.js.map +1 -1
  18. package/lib/client/AbstractPowerSyncDatabase.d.ts +5 -1
  19. package/lib/client/AbstractPowerSyncDatabase.js +9 -5
  20. package/lib/client/AbstractPowerSyncDatabase.js.map +1 -1
  21. package/lib/client/sync/stream/AbstractRemote.d.ts +29 -8
  22. package/lib/client/sync/stream/AbstractRemote.js +154 -177
  23. package/lib/client/sync/stream/AbstractRemote.js.map +1 -1
  24. package/lib/client/sync/stream/AbstractStreamingSyncImplementation.d.ts +1 -0
  25. package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js +69 -88
  26. package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js.map +1 -1
  27. package/lib/client/triggers/TriggerManager.d.ts +12 -1
  28. package/lib/client/triggers/TriggerManagerImpl.d.ts +2 -2
  29. package/lib/client/triggers/TriggerManagerImpl.js +3 -2
  30. package/lib/client/triggers/TriggerManagerImpl.js.map +1 -1
  31. package/lib/db/DBAdapter.d.ts +55 -9
  32. package/lib/db/DBAdapter.js +126 -0
  33. package/lib/db/DBAdapter.js.map +1 -1
  34. package/lib/index.d.ts +1 -1
  35. package/lib/index.js +0 -1
  36. package/lib/index.js.map +1 -1
  37. package/lib/utils/async.d.ts +0 -9
  38. package/lib/utils/async.js +0 -9
  39. package/lib/utils/async.js.map +1 -1
  40. package/lib/utils/mutex.d.ts +47 -5
  41. package/lib/utils/mutex.js +146 -21
  42. package/lib/utils/mutex.js.map +1 -1
  43. package/lib/utils/queue.d.ts +16 -0
  44. package/lib/utils/queue.js +42 -0
  45. package/lib/utils/queue.js.map +1 -0
  46. package/lib/utils/stream_transform.d.ts +39 -0
  47. package/lib/utils/stream_transform.js +206 -0
  48. package/lib/utils/stream_transform.js.map +1 -0
  49. package/package.json +9 -8
  50. package/src/attachments/AttachmentQueue.ts +10 -4
  51. package/src/attachments/AttachmentService.ts +2 -3
  52. package/src/attachments/README.md +6 -4
  53. package/src/attachments/SyncingService.ts +4 -5
  54. package/src/client/AbstractPowerSyncDatabase.ts +9 -5
  55. package/src/client/sync/stream/AbstractRemote.ts +182 -206
  56. package/src/client/sync/stream/AbstractStreamingSyncImplementation.ts +82 -83
  57. package/src/client/triggers/TriggerManager.ts +13 -1
  58. package/src/client/triggers/TriggerManagerImpl.ts +4 -2
  59. package/src/db/DBAdapter.ts +167 -9
  60. package/src/index.ts +1 -1
  61. package/src/utils/async.ts +0 -11
  62. package/src/utils/mutex.ts +184 -26
  63. package/src/utils/queue.ts +48 -0
  64. package/src/utils/stream_transform.ts +252 -0
  65. package/lib/utils/DataStream.d.ts +0 -62
  66. package/lib/utils/DataStream.js +0 -169
  67. package/lib/utils/DataStream.js.map +0 -1
  68. package/src/utils/DataStream.ts +0 -222
@@ -1,6 +1,5 @@
1
1
  'use strict';
2
2
 
3
- var asyncMutex = require('async-mutex');
4
3
  var eventIterator = require('event-iterator');
5
4
  var node_buffer = require('node:buffer');
6
5
 
@@ -661,7 +660,7 @@ class SyncingService {
661
660
  updatedAttachments.push(downloaded);
662
661
  break;
663
662
  case exports.AttachmentState.QUEUED_DELETE:
664
- const deleted = await this.deleteAttachment(attachment);
663
+ const deleted = await this.deleteAttachment(attachment, context);
665
664
  updatedAttachments.push(deleted);
666
665
  break;
667
666
  }
@@ -739,17 +738,16 @@ class SyncingService {
739
738
  * On failure, defers to error handler or archives.
740
739
  *
741
740
  * @param attachment - The attachment record to delete
741
+ * @param context - Attachment context for database operations
742
742
  * @returns Updated attachment record
743
743
  */
744
- async deleteAttachment(attachment) {
744
+ async deleteAttachment(attachment, context) {
745
745
  try {
746
746
  await this.remoteStorage.deleteFile(attachment);
747
747
  if (attachment.localUri) {
748
748
  await this.localStorage.deleteFile(attachment.localUri);
749
749
  }
750
- await this.attachmentService.withContext(async (ctx) => {
751
- await ctx.deleteAttachment(attachment.id);
752
- });
750
+ await context.deleteAttachment(attachment.id);
753
751
  return {
754
752
  ...attachment,
755
753
  state: exports.AttachmentState.ARCHIVED
@@ -787,32 +785,198 @@ class SyncingService {
787
785
  }
788
786
 
789
787
  /**
790
- * Wrapper for async-mutex runExclusive, which allows for a timeout on each exclusive lock.
788
+ * A simple fixed-capacity queue implementation.
789
+ *
790
+ * Unlike a naive queue implemented by `array.push()` and `array.shift()`, this avoids moving array elements around
791
+ * and is `O(1)` for {@link addLast} and {@link removeFirst}.
791
792
  */
792
- async function mutexRunExclusive(mutex, callback, options) {
793
- return new Promise((resolve, reject) => {
794
- const timeout = options?.timeoutMs;
795
- let timedOut = false;
796
- const timeoutId = timeout
797
- ? setTimeout(() => {
798
- timedOut = true;
799
- reject(new Error('Timeout waiting for lock'));
800
- }, timeout)
801
- : undefined;
802
- mutex.runExclusive(async () => {
803
- if (timeoutId) {
804
- clearTimeout(timeoutId);
805
- }
806
- if (timedOut)
807
- return;
808
- try {
809
- resolve(await callback());
793
+ class Queue {
794
+ table;
795
+ // Index of the first element in the table.
796
+ head;
797
+ // Amount of items currently in the queue.
798
+ _length;
799
+ constructor(initialItems) {
800
+ this.table = [...initialItems];
801
+ this.head = 0;
802
+ this._length = this.table.length;
803
+ }
804
+ get isEmpty() {
805
+ return this.length == 0;
806
+ }
807
+ get length() {
808
+ return this._length;
809
+ }
810
+ removeFirst() {
811
+ if (this.isEmpty) {
812
+ throw new Error('Queue is empty');
813
+ }
814
+ const result = this.table[this.head];
815
+ this._length--;
816
+ this.table[this.head] = undefined;
817
+ this.head = (this.head + 1) % this.table.length;
818
+ return result;
819
+ }
820
+ addLast(element) {
821
+ if (this.length == this.table.length) {
822
+ throw new Error('Queue is full');
823
+ }
824
+ this.table[(this.head + this._length) % this.table.length] = element;
825
+ this._length++;
826
+ }
827
+ }
828
+
829
+ /**
830
+ * An asynchronous semaphore implementation with associated items per lease.
831
+ *
832
+ * @internal This class is meant to be used in PowerSync SDKs only, and is not part of the public API.
833
+ */
834
+ class Semaphore {
835
+ // Available items that are not currently assigned to a waiter.
836
+ available;
837
+ size;
838
+ // Linked list of waiters. We don't expect the wait list to become particularly large, and this allows removing
839
+ // aborted waiters from the middle of the list efficiently.
840
+ firstWaiter;
841
+ lastWaiter;
842
+ constructor(elements) {
843
+ this.available = new Queue(elements);
844
+ this.size = this.available.length;
845
+ }
846
+ addWaiter(requestedItems, onAcquire) {
847
+ const node = {
848
+ isActive: true,
849
+ acquiredItems: [],
850
+ remainingItems: requestedItems,
851
+ onAcquire,
852
+ prev: this.lastWaiter
853
+ };
854
+ if (this.lastWaiter) {
855
+ this.lastWaiter.next = node;
856
+ this.lastWaiter = node;
857
+ }
858
+ else {
859
+ // First waiter
860
+ this.lastWaiter = this.firstWaiter = node;
861
+ }
862
+ return node;
863
+ }
864
+ deactivateWaiter(waiter) {
865
+ const { prev, next } = waiter;
866
+ waiter.isActive = false;
867
+ if (prev)
868
+ prev.next = next;
869
+ if (next)
870
+ next.prev = prev;
871
+ if (waiter == this.firstWaiter)
872
+ this.firstWaiter = next;
873
+ if (waiter == this.lastWaiter)
874
+ this.lastWaiter = prev;
875
+ }
876
+ requestPermits(amount, abort) {
877
+ if (amount <= 0 || amount > this.size) {
878
+ throw new Error(`Invalid amount of items requested (${amount}), must be between 1 and ${this.size}`);
879
+ }
880
+ return new Promise((resolve, reject) => {
881
+ function rejectAborted() {
882
+ reject(abort?.reason ?? new Error('Semaphore acquire aborted'));
883
+ }
884
+ if (abort?.aborted) {
885
+ return rejectAborted();
886
+ }
887
+ let waiter;
888
+ const markCompleted = () => {
889
+ const items = waiter.acquiredItems;
890
+ waiter.acquiredItems = []; // Avoid releasing items twice.
891
+ for (const element of items) {
892
+ // Give to next waiter, if possible.
893
+ const nextWaiter = this.firstWaiter;
894
+ if (nextWaiter) {
895
+ nextWaiter.acquiredItems.push(element);
896
+ nextWaiter.remainingItems--;
897
+ if (nextWaiter.remainingItems == 0) {
898
+ nextWaiter.onAcquire();
899
+ }
900
+ }
901
+ else {
902
+ // No pending waiter, return lease into pool.
903
+ this.available.addLast(element);
904
+ }
905
+ }
906
+ };
907
+ const onAbort = () => {
908
+ abort?.removeEventListener('abort', onAbort);
909
+ if (waiter.isActive) {
910
+ this.deactivateWaiter(waiter);
911
+ rejectAborted();
912
+ }
913
+ };
914
+ const resolvePromise = () => {
915
+ this.deactivateWaiter(waiter);
916
+ abort?.removeEventListener('abort', onAbort);
917
+ const items = waiter.acquiredItems;
918
+ resolve({ items, release: markCompleted });
919
+ };
920
+ waiter = this.addWaiter(amount, resolvePromise);
921
+ // If there are items in the pool that haven't been assigned, we can pull them into this waiter. Note that this is
922
+ // only the case if we're the first waiter (otherwise, items would have been assigned to an earlier waiter).
923
+ while (!this.available.isEmpty && waiter.remainingItems > 0) {
924
+ waiter.acquiredItems.push(this.available.removeFirst());
925
+ waiter.remainingItems--;
810
926
  }
811
- catch (ex) {
812
- reject(ex);
927
+ if (waiter.remainingItems == 0) {
928
+ return resolvePromise();
813
929
  }
930
+ abort?.addEventListener('abort', onAbort);
814
931
  });
815
- });
932
+ }
933
+ /**
934
+ * Requests a single item from the pool.
935
+ *
936
+ * The returned `release` callback must be invoked to return the item into the pool.
937
+ */
938
+ async requestOne(abort) {
939
+ const { items, release } = await this.requestPermits(1, abort);
940
+ return { release, item: items[0] };
941
+ }
942
+ /**
943
+ * Requests access to all items from the pool.
944
+ *
945
+ * The returned `release` callback must be invoked to return items into the pool.
946
+ */
947
+ requestAll(abort) {
948
+ return this.requestPermits(this.size, abort);
949
+ }
950
+ }
951
+ /**
952
+ * An asynchronous mutex implementation.
953
+ *
954
+ * @internal This class is meant to be used in PowerSync SDKs only, and is not part of the public API.
955
+ */
956
+ class Mutex {
957
+ inner = new Semaphore([null]);
958
+ async acquire(abort) {
959
+ const { release } = await this.inner.requestOne(abort);
960
+ return release;
961
+ }
962
+ async runExclusive(fn, abort) {
963
+ const returnMutex = await this.acquire(abort);
964
+ try {
965
+ return await fn();
966
+ }
967
+ finally {
968
+ returnMutex();
969
+ }
970
+ }
971
+ }
972
+ function timeoutSignal(timeout) {
973
+ if (timeout == null)
974
+ return;
975
+ if ('timeout' in AbortSignal)
976
+ return AbortSignal.timeout(timeout);
977
+ const controller = new AbortController();
978
+ setTimeout(() => controller.abort(new Error('Timeout waiting for lock')), timeout);
979
+ return controller.signal;
816
980
  }
817
981
 
818
982
  /**
@@ -824,7 +988,7 @@ class AttachmentService {
824
988
  db;
825
989
  logger;
826
990
  tableName;
827
- mutex = new asyncMutex.Mutex();
991
+ mutex = new Mutex();
828
992
  context;
829
993
  constructor(db, logger, tableName = 'attachments', archivedCacheLimit = 100) {
830
994
  this.db = db;
@@ -861,7 +1025,7 @@ class AttachmentService {
861
1025
  * Executes a callback with exclusive access to the attachment context.
862
1026
  */
863
1027
  async withContext(callback) {
864
- return mutexRunExclusive(this.mutex, async () => {
1028
+ return this.mutex.runExclusive(async () => {
865
1029
  return callback(this.context);
866
1030
  });
867
1031
  }
@@ -897,9 +1061,15 @@ class AttachmentQueue {
897
1061
  tableName;
898
1062
  /** Logger instance for diagnostic information */
899
1063
  logger;
900
- /** Interval in milliseconds between periodic sync operations. Default: 30000 (30 seconds) */
1064
+ /** Interval in milliseconds between periodic sync operations. Acts as a polling timer to retry
1065
+ * failed uploads/downloads, especially after the app goes offline. Default: 30000 (30 seconds) */
901
1066
  syncIntervalMs = 30 * 1000;
902
- /** Duration in milliseconds to throttle sync operations */
1067
+ /** Throttle duration in milliseconds for the reactive watch query on the attachments table.
1068
+ * When attachment records change, a watch query detects the change and triggers a sync.
1069
+ * This throttle prevents the sync from firing too rapidly when many changes happen in
1070
+ * quick succession (e.g., bulk inserts). This is distinct from syncIntervalMs — it controls
1071
+ * how quickly the queue reacts to changes, while syncIntervalMs controls how often it polls
1072
+ * for retries. Default: 30 (from DEFAULT_WATCH_THROTTLE_MS) */
903
1073
  syncThrottleDuration;
904
1074
  /** Whether to automatically download remote attachments. Default: true */
905
1075
  downloadAttachments = true;
@@ -923,8 +1093,8 @@ class AttachmentQueue {
923
1093
  * @param options.watchAttachments - Callback for monitoring attachment changes in your data model
924
1094
  * @param options.tableName - Name of the table to store attachment records. Default: 'ps_attachment_queue'
925
1095
  * @param options.logger - Logger instance. Defaults to db.logger
926
- * @param options.syncIntervalMs - Interval between automatic syncs in milliseconds. Default: 30000
927
- * @param options.syncThrottleDuration - Throttle duration for sync operations in milliseconds. Default: 1000
1096
+ * @param options.syncIntervalMs - Periodic polling interval in milliseconds for retrying failed uploads/downloads. Default: 30000
1097
+ * @param options.syncThrottleDuration - Throttle duration in milliseconds for the reactive watch query that detects attachment changes. Prevents rapid-fire syncs during bulk changes. Default: 30
928
1098
  * @param options.downloadAttachments - Whether to automatically download remote attachments. Default: true
929
1099
  * @param options.archivedCacheLimit - Maximum archived attachments before cleanup. Default: 100
930
1100
  */
@@ -1532,6 +1702,49 @@ var Logger = /*@__PURE__*/getDefaultExportFromCjs(loggerExports);
1532
1702
  * Set of generic interfaces to allow PowerSync compatibility with
1533
1703
  * different SQLite DB implementations.
1534
1704
  */
1705
+ /**
1706
+ * Implements {@link DBGetUtils} on a {@link SqlRunner}.
1707
+ */
1708
+ function DBGetUtilsDefaultMixin(Base) {
1709
+ return class extends Base {
1710
+ async getAll(sql, parameters) {
1711
+ const res = await this.execute(sql, parameters);
1712
+ return res.rows?._array ?? [];
1713
+ }
1714
+ async getOptional(sql, parameters) {
1715
+ const res = await this.execute(sql, parameters);
1716
+ return res.rows?.item(0) ?? null;
1717
+ }
1718
+ async get(sql, parameters) {
1719
+ const res = await this.execute(sql, parameters);
1720
+ const first = res.rows?.item(0);
1721
+ if (!first) {
1722
+ throw new Error('Result set is empty');
1723
+ }
1724
+ return first;
1725
+ }
1726
+ async executeBatch(query, params = []) {
1727
+ // If this context can run batch statements natively, use that.
1728
+ // @ts-ignore
1729
+ if (super.executeBatch) {
1730
+ // @ts-ignore
1731
+ return super.executeBatch(query, params);
1732
+ }
1733
+ // Emulate executeBatch by running statements individually.
1734
+ let lastInsertId;
1735
+ let rowsAffected = 0;
1736
+ for (const set of params) {
1737
+ const result = await this.execute(query, set);
1738
+ lastInsertId = result.insertId;
1739
+ rowsAffected += result.rowsAffected;
1740
+ }
1741
+ return {
1742
+ rowsAffected,
1743
+ insertId: lastInsertId
1744
+ };
1745
+ }
1746
+ };
1747
+ }
1535
1748
  /**
1536
1749
  * Update table operation numbers from SQLite
1537
1750
  */
@@ -1541,6 +1754,89 @@ exports.RowUpdateType = void 0;
1541
1754
  RowUpdateType[RowUpdateType["SQLITE_DELETE"] = 9] = "SQLITE_DELETE";
1542
1755
  RowUpdateType[RowUpdateType["SQLITE_UPDATE"] = 23] = "SQLITE_UPDATE";
1543
1756
  })(exports.RowUpdateType || (exports.RowUpdateType = {}));
1757
+ /**
1758
+ * A mixin to implement {@link DBAdapter} by delegating to {@link ConnectionPool.readLock} and
1759
+ * {@link ConnectionPool.writeLock}.
1760
+ */
1761
+ function DBAdapterDefaultMixin(Base) {
1762
+ return class extends Base {
1763
+ readTransaction(fn, options) {
1764
+ return this.readLock((ctx) => TransactionImplementation.runWith(ctx, fn), options);
1765
+ }
1766
+ writeTransaction(fn, options) {
1767
+ return this.writeLock((ctx) => TransactionImplementation.runWith(ctx, fn), options);
1768
+ }
1769
+ getAll(sql, parameters) {
1770
+ return this.readLock((ctx) => ctx.getAll(sql, parameters));
1771
+ }
1772
+ getOptional(sql, parameters) {
1773
+ return this.readLock((ctx) => ctx.getOptional(sql, parameters));
1774
+ }
1775
+ get(sql, parameters) {
1776
+ return this.readLock((ctx) => ctx.get(sql, parameters));
1777
+ }
1778
+ execute(query, params) {
1779
+ return this.writeLock((ctx) => ctx.execute(query, params));
1780
+ }
1781
+ executeRaw(query, params) {
1782
+ return this.writeLock((ctx) => ctx.executeRaw(query, params));
1783
+ }
1784
+ executeBatch(query, params) {
1785
+ return this.writeTransaction((tx) => tx.executeBatch(query, params));
1786
+ }
1787
+ };
1788
+ }
1789
+ class BaseTransaction {
1790
+ inner;
1791
+ finalized = false;
1792
+ constructor(inner) {
1793
+ this.inner = inner;
1794
+ }
1795
+ async commit() {
1796
+ if (this.finalized) {
1797
+ return { rowsAffected: 0 };
1798
+ }
1799
+ this.finalized = true;
1800
+ return this.inner.execute('COMMIT');
1801
+ }
1802
+ async rollback() {
1803
+ if (this.finalized) {
1804
+ return { rowsAffected: 0 };
1805
+ }
1806
+ this.finalized = true;
1807
+ return this.inner.execute('ROLLBACK');
1808
+ }
1809
+ execute(query, params) {
1810
+ return this.inner.execute(query, params);
1811
+ }
1812
+ executeRaw(query, params) {
1813
+ return this.inner.executeRaw(query, params);
1814
+ }
1815
+ executeBatch(query, params) {
1816
+ return this.inner.executeBatch(query, params);
1817
+ }
1818
+ }
1819
+ class TransactionImplementation extends DBGetUtilsDefaultMixin(BaseTransaction) {
1820
+ static async runWith(ctx, fn) {
1821
+ let tx = new TransactionImplementation(ctx);
1822
+ try {
1823
+ await ctx.execute('BEGIN IMMEDIATE');
1824
+ const result = await fn(tx);
1825
+ await tx.commit();
1826
+ return result;
1827
+ }
1828
+ catch (ex) {
1829
+ try {
1830
+ await tx.rollback();
1831
+ }
1832
+ catch (ex2) {
1833
+ // In rare cases, a rollback may fail.
1834
+ // Safe to ignore.
1835
+ }
1836
+ throw ex;
1837
+ }
1838
+ }
1839
+ }
1544
1840
  function isBatchedUpdateNotification(update) {
1545
1841
  return 'tables' in update;
1546
1842
  }
@@ -1961,15 +2257,6 @@ class ControlledExecutor {
1961
2257
  }
1962
2258
  }
1963
2259
 
1964
- /**
1965
- * A ponyfill for `Symbol.asyncIterator` that is compatible with the
1966
- * [recommended polyfill](https://github.com/Azure/azure-sdk-for-js/blob/%40azure/core-asynciterator-polyfill_1.0.2/sdk/core/core-asynciterator-polyfill/src/index.ts#L4-L6)
1967
- * we recommend for React Native.
1968
- *
1969
- * As long as we use this symbol (instead of `for await` and `async *`) in this package, we can be compatible with async
1970
- * iterators without requiring them.
1971
- */
1972
- const symbolAsyncIterator = Symbol.asyncIterator ?? Symbol.for('Symbol.asyncIterator');
1973
2260
  /**
1974
2261
  * Throttle a function to be called at most once every "wait" milliseconds,
1975
2262
  * on the trailing edge.
@@ -7935,177 +8222,10 @@ function requireDist () {
7935
8222
 
7936
8223
  var distExports = requireDist();
7937
8224
 
7938
- var version = "1.48.0";
8225
+ var version = "1.51.0";
7939
8226
  var PACKAGE = {
7940
8227
  version: version};
7941
8228
 
7942
- const DEFAULT_PRESSURE_LIMITS = {
7943
- highWater: 10,
7944
- lowWater: 0
7945
- };
7946
- /**
7947
- * A very basic implementation of a data stream with backpressure support which does not use
7948
- * native JS streams or async iterators.
7949
- * This is handy for environments such as React Native which need polyfills for the above.
7950
- */
7951
- class DataStream extends BaseObserver {
7952
- options;
7953
- dataQueue;
7954
- isClosed;
7955
- processingPromise;
7956
- notifyDataAdded;
7957
- logger;
7958
- mapLine;
7959
- constructor(options) {
7960
- super();
7961
- this.options = options;
7962
- this.processingPromise = null;
7963
- this.isClosed = false;
7964
- this.dataQueue = [];
7965
- this.mapLine = options?.mapLine ?? ((line) => line);
7966
- this.logger = options?.logger ?? Logger.get('DataStream');
7967
- if (options?.closeOnError) {
7968
- const l = this.registerListener({
7969
- error: (ex) => {
7970
- l?.();
7971
- this.close();
7972
- }
7973
- });
7974
- }
7975
- }
7976
- get highWatermark() {
7977
- return this.options?.pressure?.highWaterMark ?? DEFAULT_PRESSURE_LIMITS.highWater;
7978
- }
7979
- get lowWatermark() {
7980
- return this.options?.pressure?.lowWaterMark ?? DEFAULT_PRESSURE_LIMITS.lowWater;
7981
- }
7982
- get closed() {
7983
- return this.isClosed;
7984
- }
7985
- async close() {
7986
- this.isClosed = true;
7987
- await this.processingPromise;
7988
- this.iterateListeners((l) => l.closed?.());
7989
- // Discard any data in the queue
7990
- this.dataQueue = [];
7991
- this.listeners.clear();
7992
- }
7993
- /**
7994
- * Enqueues data for the consumers to read
7995
- */
7996
- enqueueData(data) {
7997
- if (this.isClosed) {
7998
- throw new Error('Cannot enqueue data into closed stream.');
7999
- }
8000
- this.dataQueue.push(data);
8001
- this.notifyDataAdded?.();
8002
- this.processQueue();
8003
- }
8004
- /**
8005
- * Reads data once from the data stream
8006
- * @returns a Data payload or Null if the stream closed.
8007
- */
8008
- async read() {
8009
- if (this.closed) {
8010
- return null;
8011
- }
8012
- // Wait for any pending processing to complete first.
8013
- // This ensures we register our listener before calling processQueue(),
8014
- // avoiding a race where processQueue() sees no reader and returns early.
8015
- if (this.processingPromise) {
8016
- await this.processingPromise;
8017
- }
8018
- // Re-check after await - stream may have closed while we were waiting
8019
- if (this.closed) {
8020
- return null;
8021
- }
8022
- return new Promise((resolve, reject) => {
8023
- const l = this.registerListener({
8024
- data: async (data) => {
8025
- resolve(data);
8026
- // Remove the listener
8027
- l?.();
8028
- },
8029
- closed: () => {
8030
- resolve(null);
8031
- l?.();
8032
- },
8033
- error: (ex) => {
8034
- reject(ex);
8035
- l?.();
8036
- }
8037
- });
8038
- this.processQueue();
8039
- });
8040
- }
8041
- /**
8042
- * Executes a callback for each data item in the stream
8043
- */
8044
- forEach(callback) {
8045
- if (this.dataQueue.length <= this.lowWatermark) {
8046
- this.iterateAsyncErrored(async (l) => l.lowWater?.());
8047
- }
8048
- return this.registerListener({
8049
- data: callback
8050
- });
8051
- }
8052
- processQueue() {
8053
- if (this.processingPromise) {
8054
- return;
8055
- }
8056
- const promise = (this.processingPromise = this._processQueue());
8057
- promise.finally(() => {
8058
- this.processingPromise = null;
8059
- });
8060
- return promise;
8061
- }
8062
- hasDataReader() {
8063
- return Array.from(this.listeners.values()).some((l) => !!l.data);
8064
- }
8065
- async _processQueue() {
8066
- /**
8067
- * Allow listeners to mutate the queue before processing.
8068
- * This allows for operations such as dropping or compressing data
8069
- * on high water or requesting more data on low water.
8070
- */
8071
- if (this.dataQueue.length >= this.highWatermark) {
8072
- await this.iterateAsyncErrored(async (l) => l.highWater?.());
8073
- }
8074
- if (this.isClosed || !this.hasDataReader()) {
8075
- return;
8076
- }
8077
- if (this.dataQueue.length) {
8078
- const data = this.dataQueue.shift();
8079
- const mapped = this.mapLine(data);
8080
- await this.iterateAsyncErrored(async (l) => l.data?.(mapped));
8081
- }
8082
- if (this.dataQueue.length <= this.lowWatermark) {
8083
- const dataAdded = new Promise((resolve) => {
8084
- this.notifyDataAdded = resolve;
8085
- });
8086
- await Promise.race([this.iterateAsyncErrored(async (l) => l.lowWater?.()), dataAdded]);
8087
- this.notifyDataAdded = null;
8088
- }
8089
- if (this.dataQueue.length > 0) {
8090
- setTimeout(() => this.processQueue());
8091
- }
8092
- }
8093
- async iterateAsyncErrored(cb) {
8094
- // Important: We need to copy the listeners, as calling a listener could result in adding another
8095
- // listener, resulting in infinite loops.
8096
- const listeners = Array.from(this.listeners.values());
8097
- for (let i of listeners) {
8098
- try {
8099
- await cb(i);
8100
- }
8101
- catch (ex) {
8102
- this.logger.error(ex);
8103
- this.iterateListeners((l) => l.error?.(ex));
8104
- }
8105
- }
8106
- }
8107
- }
8108
-
8109
8229
  var WebsocketDuplexConnection = {};
8110
8230
 
8111
8231
  var hasRequiredWebsocketDuplexConnection;
@@ -8268,8 +8388,215 @@ class WebsocketClientTransport {
8268
8388
  }
8269
8389
  }
8270
8390
 
8391
+ const doneResult = { done: true, value: undefined };
8392
+ function valueResult(value) {
8393
+ return { done: false, value };
8394
+ }
8395
+ /**
8396
+ * A variant of {@link Array.map} for async iterators.
8397
+ */
8398
+ function map(source, map) {
8399
+ return {
8400
+ next: async () => {
8401
+ const value = await source.next();
8402
+ if (value.done) {
8403
+ return value;
8404
+ }
8405
+ else {
8406
+ return { value: map(value.value) };
8407
+ }
8408
+ }
8409
+ };
8410
+ }
8411
+ /**
8412
+ * Expands a source async iterator by allowing to inject events asynchronously.
8413
+ *
8414
+ * The resulting iterator will emit all events from its source. Additionally though, events can be injected. These
8415
+ * events are dropped once the main iterator completes, but are otherwise forwarded.
8416
+ *
8417
+ * The iterator completes when its source completes, and it supports backpressure by only calling `next()` on the source
8418
+ * in response to a `next()` call from downstream if no pending injected events can be dispatched.
8419
+ */
8420
+ function injectable(source) {
8421
+ let sourceIsDone = false;
8422
+ let waiter = undefined; // An active, waiting next() call.
8423
+ // A pending upstream event that couldn't be dispatched because inject() has been called before it was resolved.
8424
+ let pendingSourceEvent = null;
8425
+ let pendingInjectedEvents = [];
8426
+ const consumeWaiter = () => {
8427
+ const pending = waiter;
8428
+ waiter = undefined;
8429
+ return pending;
8430
+ };
8431
+ const fetchFromSource = () => {
8432
+ const resolveWaiter = (propagate) => {
8433
+ const active = consumeWaiter();
8434
+ if (active) {
8435
+ propagate(active);
8436
+ }
8437
+ else {
8438
+ pendingSourceEvent = propagate;
8439
+ }
8440
+ };
8441
+ const nextFromSource = source.next();
8442
+ nextFromSource.then((value) => {
8443
+ sourceIsDone = value.done == true;
8444
+ resolveWaiter((w) => w.resolve(value));
8445
+ }, (error) => {
8446
+ resolveWaiter((w) => w.reject(error));
8447
+ });
8448
+ };
8449
+ return {
8450
+ next: () => {
8451
+ return new Promise((resolve, reject) => {
8452
+ // First priority: Dispatch ready upstream events.
8453
+ if (sourceIsDone) {
8454
+ return resolve(doneResult);
8455
+ }
8456
+ if (pendingSourceEvent) {
8457
+ pendingSourceEvent({ resolve, reject });
8458
+ pendingSourceEvent = null;
8459
+ return;
8460
+ }
8461
+ // Second priority: Dispatch injected events
8462
+ if (pendingInjectedEvents.length) {
8463
+ return resolve(valueResult(pendingInjectedEvents.shift()));
8464
+ }
8465
+ // Nothing pending? Fetch from source
8466
+ waiter = { resolve, reject };
8467
+ return fetchFromSource();
8468
+ });
8469
+ },
8470
+ inject: (event) => {
8471
+ const pending = consumeWaiter();
8472
+ if (pending != null) {
8473
+ pending.resolve(valueResult(event));
8474
+ }
8475
+ else {
8476
+ pendingInjectedEvents.push(event);
8477
+ }
8478
+ }
8479
+ };
8480
+ }
8481
+ /**
8482
+ * Splits a byte stream at line endings, emitting each line as a string.
8483
+ */
8484
+ function extractJsonLines(source, decoder) {
8485
+ let buffer = '';
8486
+ const pendingLines = [];
8487
+ let isFinalEvent = false;
8488
+ return {
8489
+ next: async () => {
8490
+ while (true) {
8491
+ if (isFinalEvent) {
8492
+ return doneResult;
8493
+ }
8494
+ {
8495
+ const first = pendingLines.shift();
8496
+ if (first) {
8497
+ return { done: false, value: first };
8498
+ }
8499
+ }
8500
+ const { done, value } = await source.next();
8501
+ if (done) {
8502
+ const remaining = buffer.trim();
8503
+ if (remaining.length != 0) {
8504
+ isFinalEvent = true;
8505
+ return { done: false, value: remaining };
8506
+ }
8507
+ return doneResult;
8508
+ }
8509
+ const data = decoder.decode(value, { stream: true });
8510
+ buffer += data;
8511
+ const lines = buffer.split('\n');
8512
+ for (let i = 0; i < lines.length - 1; i++) {
8513
+ const l = lines[i].trim();
8514
+ if (l.length > 0) {
8515
+ pendingLines.push(l);
8516
+ }
8517
+ }
8518
+ buffer = lines[lines.length - 1];
8519
+ }
8520
+ }
8521
+ };
8522
+ }
8523
+ /**
8524
+ * Splits a concatenated stream of BSON objects by emitting individual objects.
8525
+ */
8526
+ function extractBsonObjects(source) {
8527
+ // Fully read but not emitted yet.
8528
+ const completedObjects = [];
8529
+ // Whether source has returned { done: true }. We do the same once completed objects have been emitted.
8530
+ let isDone = false;
8531
+ const lengthBuffer = new DataView(new ArrayBuffer(4));
8532
+ let objectBody = null;
8533
+ // If we're parsing the length field, a number between 1 and 4 (inclusive) describing remaining bytes in the header.
8534
+ // If we're consuming a document, the bytes remaining.
8535
+ let remainingLength = 4;
8536
+ return {
8537
+ async next() {
8538
+ while (true) {
8539
+ // Before fetching new data from upstream, return completed objects.
8540
+ if (completedObjects.length) {
8541
+ return valueResult(completedObjects.shift());
8542
+ }
8543
+ if (isDone) {
8544
+ return doneResult;
8545
+ }
8546
+ const upstreamEvent = await source.next();
8547
+ if (upstreamEvent.done) {
8548
+ isDone = true;
8549
+ if (objectBody || remainingLength != 4) {
8550
+ throw new Error('illegal end of stream in BSON object');
8551
+ }
8552
+ return doneResult;
8553
+ }
8554
+ const chunk = upstreamEvent.value;
8555
+ for (let i = 0; i < chunk.length;) {
8556
+ const availableInData = chunk.length - i;
8557
+ if (objectBody) {
8558
+ // We're in the middle of reading a BSON document.
8559
+ const bytesToRead = Math.min(availableInData, remainingLength);
8560
+ const copySource = new Uint8Array(chunk.buffer, chunk.byteOffset + i, bytesToRead);
8561
+ objectBody.set(copySource, objectBody.length - remainingLength);
8562
+ i += bytesToRead;
8563
+ remainingLength -= bytesToRead;
8564
+ if (remainingLength == 0) {
8565
+ completedObjects.push(objectBody);
8566
+ // Prepare to read another document, starting with its length
8567
+ objectBody = null;
8568
+ remainingLength = 4;
8569
+ }
8570
+ }
8571
+ else {
8572
+ // Copy up to 4 bytes into lengthBuffer, depending on how many we still need.
8573
+ const bytesToRead = Math.min(availableInData, remainingLength);
8574
+ for (let j = 0; j < bytesToRead; j++) {
8575
+ lengthBuffer.setUint8(4 - remainingLength + j, chunk[i + j]);
8576
+ }
8577
+ i += bytesToRead;
8578
+ remainingLength -= bytesToRead;
8579
+ if (remainingLength == 0) {
8580
+ // Transition from reading length header to reading document. Subtracting 4 because the length of the
8581
+ // header is included in length.
8582
+ const length = lengthBuffer.getInt32(0, true /* little endian */);
8583
+ remainingLength = length - 4;
8584
+ if (remainingLength < 1) {
8585
+ throw new Error(`invalid length for bson: ${length}`);
8586
+ }
8587
+ objectBody = new Uint8Array(length);
8588
+ new DataView(objectBody.buffer).setInt32(0, length, true);
8589
+ }
8590
+ }
8591
+ }
8592
+ }
8593
+ }
8594
+ };
8595
+ }
8596
+
8271
8597
  const POWERSYNC_TRAILING_SLASH_MATCH = /\/+$/;
8272
8598
  const POWERSYNC_JS_VERSION = PACKAGE.version;
8599
+ const SYNC_QUEUE_REQUEST_HIGH_WATER = 10;
8273
8600
  const SYNC_QUEUE_REQUEST_LOW_WATER = 5;
8274
8601
  // Keep alive message is sent every period
8275
8602
  const KEEP_ALIVE_MS = 20_000;
@@ -8449,13 +8776,14 @@ class AbstractRemote {
8449
8776
  return new WebSocket(url);
8450
8777
  }
8451
8778
  /**
8452
- * Returns a data stream of sync line data.
8779
+ * Returns a data stream of sync line data, fetched via RSocket-over-WebSocket.
8780
+ *
8781
+ * The only mechanism to abort the returned stream is to use the abort signal in {@link SocketSyncStreamOptions}.
8453
8782
  *
8454
- * @param map Maps received payload frames to the typed event value.
8455
8783
  * @param bson A BSON encoder and decoder. When set, the data stream will be requested with a BSON payload
8456
8784
  * (required for compatibility with older sync services).
8457
8785
  */
8458
- async socketStreamRaw(options, map, bson) {
8786
+ async socketStreamRaw(options, bson) {
8459
8787
  const { path, fetchStrategy = exports.FetchStrategy.Buffered } = options;
8460
8788
  const mimeType = bson == null ? 'application/json' : 'application/bson';
8461
8789
  function toBuffer(js) {
@@ -8470,52 +8798,55 @@ class AbstractRemote {
8470
8798
  }
8471
8799
  const syncQueueRequestSize = fetchStrategy == exports.FetchStrategy.Buffered ? 10 : 1;
8472
8800
  const request = await this.buildRequest(path);
8801
+ const url = this.options.socketUrlTransformer(request.url);
8473
8802
  // Add the user agent in the setup payload - we can't set custom
8474
8803
  // headers with websockets on web. The browser userAgent is however added
8475
8804
  // automatically as a header.
8476
8805
  const userAgent = this.getUserAgent();
8477
- const stream = new DataStream({
8478
- logger: this.logger,
8479
- pressure: {
8480
- lowWaterMark: SYNC_QUEUE_REQUEST_LOW_WATER
8481
- },
8482
- mapLine: map
8483
- });
8806
+ // While we're connecting (a process that can't be aborted in RSocket), the WebSocket instance to close if we wanted
8807
+ // to abort the connection.
8808
+ let pendingSocket = null;
8809
+ let keepAliveTimeout;
8810
+ let rsocket = null;
8811
+ let queue = null;
8812
+ let didClose = false;
8813
+ const abortRequest = () => {
8814
+ if (didClose) {
8815
+ return;
8816
+ }
8817
+ didClose = true;
8818
+ clearTimeout(keepAliveTimeout);
8819
+ if (pendingSocket) {
8820
+ pendingSocket.close();
8821
+ }
8822
+ if (rsocket) {
8823
+ rsocket.close();
8824
+ }
8825
+ if (queue) {
8826
+ queue.stop();
8827
+ }
8828
+ };
8484
8829
  // Handle upstream abort
8485
- if (options.abortSignal?.aborted) {
8830
+ if (options.abortSignal.aborted) {
8486
8831
  throw new AbortOperation('Connection request aborted');
8487
8832
  }
8488
8833
  else {
8489
- options.abortSignal?.addEventListener('abort', () => {
8490
- stream.close();
8491
- }, { once: true });
8834
+ options.abortSignal.addEventListener('abort', abortRequest);
8492
8835
  }
8493
- let keepAliveTimeout;
8494
8836
  const resetTimeout = () => {
8495
8837
  clearTimeout(keepAliveTimeout);
8496
8838
  keepAliveTimeout = setTimeout(() => {
8497
8839
  this.logger.error(`No data received on WebSocket in ${SOCKET_TIMEOUT_MS}ms, closing connection.`);
8498
- stream.close();
8840
+ abortRequest();
8499
8841
  }, SOCKET_TIMEOUT_MS);
8500
8842
  };
8501
8843
  resetTimeout();
8502
- // Typescript complains about this being `never` if it's not assigned here.
8503
- // This is assigned in `wsCreator`.
8504
- let disposeSocketConnectionTimeout = () => { };
8505
- const url = this.options.socketUrlTransformer(request.url);
8506
8844
  const connector = new distExports.RSocketConnector({
8507
8845
  transport: new WebsocketClientTransport({
8508
8846
  url,
8509
8847
  wsCreator: (url) => {
8510
- const socket = this.createSocket(url);
8511
- disposeSocketConnectionTimeout = stream.registerListener({
8512
- closed: () => {
8513
- // Allow closing the underlying WebSocket if the stream was closed before the
8514
- // RSocket connect completed. This should effectively abort the request.
8515
- socket.close();
8516
- }
8517
- });
8518
- socket.addEventListener('message', (event) => {
8848
+ const socket = (pendingSocket = this.createSocket(url));
8849
+ socket.addEventListener('message', () => {
8519
8850
  resetTimeout();
8520
8851
  });
8521
8852
  return socket;
@@ -8535,43 +8866,40 @@ class AbstractRemote {
8535
8866
  }
8536
8867
  }
8537
8868
  });
8538
- let rsocket;
8539
8869
  try {
8540
8870
  rsocket = await connector.connect();
8541
8871
  // The connection is established, we no longer need to monitor the initial timeout
8542
- disposeSocketConnectionTimeout();
8872
+ pendingSocket = null;
8543
8873
  }
8544
8874
  catch (ex) {
8545
8875
  this.logger.error(`Failed to connect WebSocket`, ex);
8546
- clearTimeout(keepAliveTimeout);
8547
- if (!stream.closed) {
8548
- await stream.close();
8549
- }
8876
+ abortRequest();
8550
8877
  throw ex;
8551
8878
  }
8552
8879
  resetTimeout();
8553
- let socketIsClosed = false;
8554
- const closeSocket = () => {
8555
- clearTimeout(keepAliveTimeout);
8556
- if (socketIsClosed) {
8557
- return;
8558
- }
8559
- socketIsClosed = true;
8560
- rsocket.close();
8561
- };
8562
8880
  // Helps to prevent double close scenarios
8563
- rsocket.onClose(() => (socketIsClosed = true));
8564
- // We initially request this amount and expect these to arrive eventually
8565
- let pendingEventsCount = syncQueueRequestSize;
8566
- const disposeClosedListener = stream.registerListener({
8567
- closed: () => {
8568
- closeSocket();
8569
- disposeClosedListener();
8570
- }
8571
- });
8572
- const socket = await new Promise((resolve, reject) => {
8881
+ rsocket.onClose(() => (rsocket = null));
8882
+ return await new Promise((resolve, reject) => {
8573
8883
  let connectionEstablished = false;
8574
- const res = rsocket.requestStream({
8884
+ let pendingEventsCount = syncQueueRequestSize;
8885
+ let paused = false;
8886
+ let res = null;
8887
+ function requestMore() {
8888
+ const delta = syncQueueRequestSize - pendingEventsCount;
8889
+ if (!paused && delta > 0) {
8890
+ res?.request(delta);
8891
+ pendingEventsCount = syncQueueRequestSize;
8892
+ }
8893
+ }
8894
+ const events = new eventIterator.EventIterator((q) => {
8895
+ queue = q;
8896
+ q.on('highWater', () => (paused = true));
8897
+ q.on('lowWater', () => {
8898
+ paused = false;
8899
+ requestMore();
8900
+ });
8901
+ }, { highWaterMark: SYNC_QUEUE_REQUEST_HIGH_WATER, lowWaterMark: SYNC_QUEUE_REQUEST_LOW_WATER })[Symbol.asyncIterator]();
8902
+ res = rsocket.requestStream({
8575
8903
  data: toBuffer(options.data),
8576
8904
  metadata: toBuffer({
8577
8905
  path
@@ -8596,7 +8924,7 @@ class AbstractRemote {
8596
8924
  }
8597
8925
  // RSocket will close the RSocket stream automatically
8598
8926
  // Close the downstream stream as well - this will close the RSocket connection and WebSocket
8599
- stream.close();
8927
+ abortRequest();
8600
8928
  // Handles cases where the connection failed e.g. auth error or connection error
8601
8929
  if (!connectionEstablished) {
8602
8930
  reject(e);
@@ -8606,41 +8934,40 @@ class AbstractRemote {
8606
8934
  // The connection is active
8607
8935
  if (!connectionEstablished) {
8608
8936
  connectionEstablished = true;
8609
- resolve(res);
8937
+ resolve(events);
8610
8938
  }
8611
8939
  const { data } = payload;
8940
+ if (data) {
8941
+ queue.push(data);
8942
+ }
8612
8943
  // Less events are now pending
8613
8944
  pendingEventsCount--;
8614
- if (!data) {
8615
- return;
8616
- }
8617
- stream.enqueueData(data);
8945
+ // Request another event (unless the downstream consumer is paused).
8946
+ requestMore();
8618
8947
  },
8619
8948
  onComplete: () => {
8620
- stream.close();
8949
+ abortRequest(); // this will also emit a done event
8621
8950
  },
8622
8951
  onExtension: () => { }
8623
8952
  });
8624
8953
  });
8625
- const l = stream.registerListener({
8626
- lowWater: async () => {
8627
- // Request to fill up the queue
8628
- const required = syncQueueRequestSize - pendingEventsCount;
8629
- if (required > 0) {
8630
- socket.request(syncQueueRequestSize - pendingEventsCount);
8631
- pendingEventsCount = syncQueueRequestSize;
8632
- }
8633
- },
8634
- closed: () => {
8635
- l();
8636
- }
8637
- });
8638
- return stream;
8639
8954
  }
8640
8955
  /**
8641
- * Connects to the sync/stream http endpoint, mapping and emitting each received string line.
8956
+ * @returns Whether the HTTP implementation on this platform can receive streamed binary responses. This is true on
8957
+ * all platforms except React Native (who would have guessed...), where we must not request BSON responses.
8958
+ *
8959
+ * @see https://github.com/react-native-community/fetch?tab=readme-ov-file#motivation
8960
+ */
8961
+ get supportsStreamingBinaryResponses() {
8962
+ return true;
8963
+ }
8964
+ /**
8965
+ * Posts a `/sync/stream` request, asserts that it completes successfully and returns the streaming response as an
8966
+ * async iterator of byte blobs.
8967
+ *
8968
+ * To cancel the async iterator, use the abort signal from {@link SyncStreamOptions} passed to this method.
8642
8969
  */
8643
- async postStreamRaw(options, mapLine) {
8970
+ async fetchStreamRaw(options) {
8644
8971
  const { data, path, headers, abortSignal } = options;
8645
8972
  const request = await this.buildRequest(path);
8646
8973
  /**
@@ -8652,119 +8979,94 @@ class AbstractRemote {
8652
8979
  * Aborting the active fetch request while it is being consumed seems to throw
8653
8980
  * an unhandled exception on the window level.
8654
8981
  */
8655
- if (abortSignal?.aborted) {
8656
- throw new AbortOperation('Abort request received before making postStreamRaw request');
8982
+ if (abortSignal.aborted) {
8983
+ throw new AbortOperation('Abort request received before making fetchStreamRaw request');
8657
8984
  }
8658
8985
  const controller = new AbortController();
8659
- let requestResolved = false;
8660
- abortSignal?.addEventListener('abort', () => {
8661
- if (!requestResolved) {
8986
+ let reader = null;
8987
+ abortSignal.addEventListener('abort', () => {
8988
+ const reason = abortSignal.reason ??
8989
+ new AbortOperation('Cancelling network request before it resolves. Abort signal has been received.');
8990
+ if (reader == null) {
8662
8991
  // Only abort via the abort controller if the request has not resolved yet
8663
- controller.abort(abortSignal.reason ??
8664
- new AbortOperation('Cancelling network request before it resolves. Abort signal has been received.'));
8992
+ controller.abort(reason);
8993
+ }
8994
+ else {
8995
+ reader.cancel(reason).catch(() => {
8996
+ // Cancelling the reader might rethrow an exception we would have handled by throwing in next(). So we can
8997
+ // ignore it here.
8998
+ });
8665
8999
  }
8666
9000
  });
8667
- const res = await this.fetch(request.url, {
8668
- method: 'POST',
8669
- headers: { ...headers, ...request.headers },
8670
- body: JSON.stringify(data),
8671
- signal: controller.signal,
8672
- cache: 'no-store',
8673
- ...(this.options.fetchOptions ?? {}),
8674
- ...options.fetchOptions
8675
- }).catch((ex) => {
9001
+ let res;
9002
+ let responseIsBson = false;
9003
+ try {
9004
+ const ndJson = 'application/x-ndjson';
9005
+ const bson = 'application/vnd.powersync.bson-stream';
9006
+ res = await this.fetch(request.url, {
9007
+ method: 'POST',
9008
+ headers: {
9009
+ ...headers,
9010
+ ...request.headers,
9011
+ accept: this.supportsStreamingBinaryResponses ? `${bson};q=0.9,${ndJson};q=0.8` : ndJson
9012
+ },
9013
+ body: JSON.stringify(data),
9014
+ signal: controller.signal,
9015
+ cache: 'no-store',
9016
+ ...(this.options.fetchOptions ?? {}),
9017
+ ...options.fetchOptions
9018
+ });
9019
+ if (!res.ok || !res.body) {
9020
+ const text = await res.text();
9021
+ this.logger.error(`Could not POST streaming to ${path} - ${res.status} - ${res.statusText}: ${text}`);
9022
+ const error = new Error(`HTTP ${res.statusText}: ${text}`);
9023
+ error.status = res.status;
9024
+ throw error;
9025
+ }
9026
+ const contentType = res.headers.get('content-type');
9027
+ responseIsBson = contentType == bson;
9028
+ }
9029
+ catch (ex) {
8676
9030
  if (ex.name == 'AbortError') {
8677
9031
  throw new AbortOperation(`Pending fetch request to ${request.url} has been aborted.`);
8678
9032
  }
8679
9033
  throw ex;
8680
- });
8681
- if (!res) {
8682
- throw new Error('Fetch request was aborted');
8683
- }
8684
- requestResolved = true;
8685
- if (!res.ok || !res.body) {
8686
- const text = await res.text();
8687
- this.logger.error(`Could not POST streaming to ${path} - ${res.status} - ${res.statusText}: ${text}`);
8688
- const error = new Error(`HTTP ${res.statusText}: ${text}`);
8689
- error.status = res.status;
8690
- throw error;
8691
9034
  }
8692
- // Create a new stream splitting the response at line endings while also handling cancellations
8693
- // by closing the reader.
8694
- const reader = res.body.getReader();
8695
- let readerReleased = false;
8696
- // This will close the network request and read stream
8697
- const closeReader = async () => {
8698
- try {
8699
- readerReleased = true;
8700
- await reader.cancel();
8701
- }
8702
- catch (ex) {
8703
- // an error will throw if the reader hasn't been used yet
8704
- }
8705
- reader.releaseLock();
8706
- };
8707
- const stream = new DataStream({
8708
- logger: this.logger,
8709
- mapLine: mapLine,
8710
- pressure: {
8711
- highWaterMark: 20,
8712
- lowWaterMark: 10
8713
- }
8714
- });
8715
- abortSignal?.addEventListener('abort', () => {
8716
- closeReader();
8717
- stream.close();
8718
- });
8719
- const decoder = this.createTextDecoder();
8720
- let buffer = '';
8721
- const consumeStream = async () => {
8722
- while (!stream.closed && !abortSignal?.aborted && !readerReleased) {
8723
- const { done, value } = await reader.read();
8724
- if (done) {
8725
- const remaining = buffer.trim();
8726
- if (remaining.length != 0) {
8727
- stream.enqueueData(remaining);
8728
- }
8729
- stream.close();
8730
- await closeReader();
8731
- return;
9035
+ reader = res.body.getReader();
9036
+ const stream = {
9037
+ next: async () => {
9038
+ if (controller.signal.aborted) {
9039
+ return doneResult;
8732
9040
  }
8733
- const data = decoder.decode(value, { stream: true });
8734
- buffer += data;
8735
- const lines = buffer.split('\n');
8736
- for (var i = 0; i < lines.length - 1; i++) {
8737
- var l = lines[i].trim();
8738
- if (l.length > 0) {
8739
- stream.enqueueData(l);
8740
- }
9041
+ try {
9042
+ return await reader.read();
8741
9043
  }
8742
- buffer = lines[lines.length - 1];
8743
- // Implement backpressure by waiting for the low water mark to be reached
8744
- if (stream.dataQueue.length > stream.highWatermark) {
8745
- await new Promise((resolve) => {
8746
- const dispose = stream.registerListener({
8747
- lowWater: async () => {
8748
- resolve();
8749
- dispose();
8750
- },
8751
- closed: () => {
8752
- resolve();
8753
- dispose();
8754
- }
8755
- });
8756
- });
9044
+ catch (ex) {
9045
+ if (controller.signal.aborted) {
9046
+ // .read() completes with an error if we cancel the reader, which we do to disconnect. So this is just
9047
+ // things working as intended, we can return a done event and consider the exception handled.
9048
+ return doneResult;
9049
+ }
9050
+ throw ex;
8757
9051
  }
8758
9052
  }
8759
9053
  };
8760
- consumeStream().catch(ex => this.logger.error('Error consuming stream', ex));
8761
- const l = stream.registerListener({
8762
- closed: () => {
8763
- closeReader();
8764
- l?.();
8765
- }
8766
- });
8767
- return stream;
9054
+ return { isBson: responseIsBson, stream };
9055
+ }
9056
+ /**
9057
+ * Posts a `/sync/stream` request.
9058
+ *
9059
+ * Depending on the `Content-Type` of the response, this returns strings for sync lines or encoded BSON documents as
9060
+ * {@link Uint8Array}s.
9061
+ */
9062
+ async fetchStream(options) {
9063
+ const { isBson, stream } = await this.fetchStreamRaw(options);
9064
+ if (isBson) {
9065
+ return extractBsonObjects(stream);
9066
+ }
9067
+ else {
9068
+ return extractJsonLines(stream, this.createTextDecoder());
9069
+ }
8768
9070
  }
8769
9071
  }
8770
9072
 
@@ -9272,6 +9574,19 @@ The next upload iteration will be delayed.`);
9272
9574
  }
9273
9575
  });
9274
9576
  }
9577
+ async receiveSyncLines(data) {
9578
+ const { options, connection, bson } = data;
9579
+ const remote = this.options.remote;
9580
+ if (connection.connectionMethod == exports.SyncStreamConnectionMethod.HTTP) {
9581
+ return await remote.fetchStream(options);
9582
+ }
9583
+ else {
9584
+ return await this.options.remote.socketStreamRaw({
9585
+ ...options,
9586
+ ...{ fetchStrategy: connection.fetchStrategy }
9587
+ }, bson);
9588
+ }
9589
+ }
9275
9590
  async legacyStreamingSyncIteration(signal, resolvedOptions) {
9276
9591
  const rawTables = resolvedOptions.serializedSchema?.raw_tables;
9277
9592
  if (rawTables != null && rawTables.length) {
@@ -9301,42 +9616,27 @@ The next upload iteration will be delayed.`);
9301
9616
  client_id: clientId
9302
9617
  }
9303
9618
  };
9304
- let stream;
9305
- if (resolvedOptions?.connectionMethod == exports.SyncStreamConnectionMethod.HTTP) {
9306
- stream = await this.options.remote.postStreamRaw(syncOptions, (line) => {
9307
- if (typeof line == 'string') {
9308
- return JSON.parse(line);
9309
- }
9310
- else {
9311
- // Directly enqueued by us
9312
- return line;
9313
- }
9314
- });
9315
- }
9316
- else {
9317
- const bson = await this.options.remote.getBSON();
9318
- stream = await this.options.remote.socketStreamRaw({
9319
- ...syncOptions,
9320
- ...{ fetchStrategy: resolvedOptions.fetchStrategy }
9321
- }, (payload) => {
9322
- if (payload instanceof Uint8Array) {
9323
- return bson.deserialize(payload);
9324
- }
9325
- else {
9326
- // Directly enqueued by us
9327
- return payload;
9328
- }
9329
- }, bson);
9330
- }
9619
+ const bson = await this.options.remote.getBSON();
9620
+ const source = await this.receiveSyncLines({
9621
+ options: syncOptions,
9622
+ connection: resolvedOptions,
9623
+ bson
9624
+ });
9625
+ const stream = injectable(map(source, (line) => {
9626
+ if (typeof line == 'string') {
9627
+ return JSON.parse(line);
9628
+ }
9629
+ else {
9630
+ return bson.deserialize(line);
9631
+ }
9632
+ }));
9331
9633
  this.logger.debug('Stream established. Processing events');
9332
9634
  this.notifyCompletedUploads = () => {
9333
- if (!stream.closed) {
9334
- stream.enqueueData({ crud_upload_completed: null });
9335
- }
9635
+ stream.inject({ crud_upload_completed: null });
9336
9636
  };
9337
- while (!stream.closed) {
9338
- const line = await stream.read();
9339
- if (!line) {
9637
+ while (true) {
9638
+ const { value: line, done } = await stream.next();
9639
+ if (done) {
9340
9640
  // The stream has closed while waiting
9341
9641
  return;
9342
9642
  }
@@ -9515,14 +9815,17 @@ The next upload iteration will be delayed.`);
9515
9815
  const syncImplementation = this;
9516
9816
  const adapter = this.options.adapter;
9517
9817
  const remote = this.options.remote;
9818
+ const controller = new AbortController();
9819
+ const abort = () => {
9820
+ return controller.abort(signal.reason);
9821
+ };
9822
+ signal.addEventListener('abort', abort);
9518
9823
  let receivingLines = null;
9519
9824
  let hadSyncLine = false;
9520
9825
  let hideDisconnectOnRestart = false;
9521
9826
  if (signal.aborted) {
9522
9827
  throw new AbortOperation('Connection request has been aborted');
9523
9828
  }
9524
- const abortController = new AbortController();
9525
- signal.addEventListener('abort', () => abortController.abort());
9526
9829
  // Pending sync lines received from the service, as well as local events that trigger a powersync_control
9527
9830
  // invocation (local events include refreshed tokens and completed uploads).
9528
9831
  // This is a single data stream so that we can handle all control calls from a single place.
@@ -9530,49 +9833,36 @@ The next upload iteration will be delayed.`);
9530
9833
  async function connect(instr) {
9531
9834
  const syncOptions = {
9532
9835
  path: '/sync/stream',
9533
- abortSignal: abortController.signal,
9836
+ abortSignal: controller.signal,
9534
9837
  data: instr.request
9535
9838
  };
9536
- if (resolvedOptions.connectionMethod == exports.SyncStreamConnectionMethod.HTTP) {
9537
- controlInvocations = await remote.postStreamRaw(syncOptions, (line) => {
9538
- if (typeof line == 'string') {
9539
- return {
9540
- command: exports.PowerSyncControlCommand.PROCESS_TEXT_LINE,
9541
- payload: line
9542
- };
9543
- }
9544
- else {
9545
- // Directly enqueued by us
9546
- return line;
9547
- }
9548
- });
9549
- }
9550
- else {
9551
- controlInvocations = await remote.socketStreamRaw({
9552
- ...syncOptions,
9553
- fetchStrategy: resolvedOptions.fetchStrategy
9554
- }, (payload) => {
9555
- if (payload instanceof Uint8Array) {
9556
- return {
9557
- command: exports.PowerSyncControlCommand.PROCESS_BSON_LINE,
9558
- payload: payload
9559
- };
9560
- }
9561
- else {
9562
- // Directly enqueued by us
9563
- return payload;
9564
- }
9565
- });
9566
- }
9839
+ controlInvocations = injectable(map(await syncImplementation.receiveSyncLines({
9840
+ options: syncOptions,
9841
+ connection: resolvedOptions
9842
+ }), (line) => {
9843
+ if (typeof line == 'string') {
9844
+ return {
9845
+ command: exports.PowerSyncControlCommand.PROCESS_TEXT_LINE,
9846
+ payload: line
9847
+ };
9848
+ }
9849
+ else {
9850
+ return {
9851
+ command: exports.PowerSyncControlCommand.PROCESS_BSON_LINE,
9852
+ payload: line
9853
+ };
9854
+ }
9855
+ }));
9567
9856
  // The rust client will set connected: true after the first sync line because that's when it gets invoked, but
9568
9857
  // we're already connected here and can report that.
9569
9858
  syncImplementation.updateSyncStatus({ connected: true });
9570
9859
  try {
9571
- while (!controlInvocations.closed) {
9572
- const line = await controlInvocations.read();
9573
- if (line == null) {
9574
- return;
9860
+ while (true) {
9861
+ let event = await controlInvocations.next();
9862
+ if (event.done) {
9863
+ break;
9575
9864
  }
9865
+ const line = event.value;
9576
9866
  await control(line.command, line.payload);
9577
9867
  if (!hadSyncLine) {
9578
9868
  syncImplementation.triggerCrudUpload();
@@ -9581,12 +9871,8 @@ The next upload iteration will be delayed.`);
9581
9871
  }
9582
9872
  }
9583
9873
  finally {
9584
- const activeInstructions = controlInvocations;
9585
- // We concurrently add events to the active data stream when e.g. a CRUD upload is completed or a token is
9586
- // refreshed. That would throw after closing (and we can't handle those events either way), so set this back
9587
- // to null.
9588
- controlInvocations = null;
9589
- await activeInstructions.close();
9874
+ abort();
9875
+ signal.removeEventListener('abort', abort);
9590
9876
  }
9591
9877
  }
9592
9878
  async function stop() {
@@ -9630,14 +9916,14 @@ The next upload iteration will be delayed.`);
9630
9916
  remote.invalidateCredentials();
9631
9917
  // Restart iteration after the credentials have been refreshed.
9632
9918
  remote.fetchCredentials().then((_) => {
9633
- controlInvocations?.enqueueData({ command: exports.PowerSyncControlCommand.NOTIFY_TOKEN_REFRESHED });
9919
+ controlInvocations?.inject({ command: exports.PowerSyncControlCommand.NOTIFY_TOKEN_REFRESHED });
9634
9920
  }, (err) => {
9635
9921
  syncImplementation.logger.warn('Could not prefetch credentials', err);
9636
9922
  });
9637
9923
  }
9638
9924
  }
9639
9925
  else if ('CloseSyncStream' in instruction) {
9640
- abortController.abort();
9926
+ controller.abort();
9641
9927
  hideDisconnectOnRestart = instruction.CloseSyncStream.hide_disconnect;
9642
9928
  }
9643
9929
  else if ('FlushFileSystem' in instruction) ;
@@ -9666,17 +9952,13 @@ The next upload iteration will be delayed.`);
9666
9952
  }
9667
9953
  await control(exports.PowerSyncControlCommand.START, JSON.stringify(options));
9668
9954
  this.notifyCompletedUploads = () => {
9669
- if (controlInvocations && !controlInvocations?.closed) {
9670
- controlInvocations.enqueueData({ command: exports.PowerSyncControlCommand.NOTIFY_CRUD_UPLOAD_COMPLETED });
9671
- }
9955
+ controlInvocations?.inject({ command: exports.PowerSyncControlCommand.NOTIFY_CRUD_UPLOAD_COMPLETED });
9672
9956
  };
9673
9957
  this.handleActiveStreamsChange = () => {
9674
- if (controlInvocations && !controlInvocations?.closed) {
9675
- controlInvocations.enqueueData({
9676
- command: exports.PowerSyncControlCommand.UPDATE_SUBSCRIPTIONS,
9677
- payload: JSON.stringify(this.activeStreams)
9678
- });
9679
- }
9958
+ controlInvocations?.inject({
9959
+ command: exports.PowerSyncControlCommand.UPDATE_SUBSCRIPTIONS,
9960
+ payload: JSON.stringify(this.activeStreams)
9961
+ });
9680
9962
  };
9681
9963
  await receivingLines;
9682
9964
  }
@@ -10023,7 +10305,8 @@ class TriggerManagerImpl {
10023
10305
  * we need to ensure we can cleanup the created resources.
10024
10306
  * We unfortunately cannot rely on transaction rollback.
10025
10307
  */
10026
- const cleanup = async (context) => {
10308
+ const cleanup = async (options) => {
10309
+ const { context } = options ?? {};
10027
10310
  disposeWarningListener();
10028
10311
  const doCleanup = async (tx) => {
10029
10312
  await this.removeTriggers(tx, triggerIds);
@@ -10119,7 +10402,7 @@ class TriggerManagerImpl {
10119
10402
  }
10120
10403
  catch (error) {
10121
10404
  try {
10122
- await cleanup();
10405
+ await cleanup(setupContext ? { context: setupContext } : undefined);
10123
10406
  }
10124
10407
  catch (cleanupError) {
10125
10408
  throw new AggregateError([error, cleanupError], 'Error during operation and cleanup');
@@ -10326,7 +10609,7 @@ class AbstractPowerSyncDatabase extends BaseObserver {
10326
10609
  this._schema = schema;
10327
10610
  this.ready = false;
10328
10611
  this.sdkVersion = '';
10329
- this.runExclusiveMutex = new asyncMutex.Mutex();
10612
+ this.runExclusiveMutex = new Mutex();
10330
10613
  // Start async init
10331
10614
  this.subscriptions = {
10332
10615
  firstStatusMatching: (predicate, abort) => this.waitForStatus(predicate, abort),
@@ -10692,7 +10975,7 @@ class AbstractPowerSyncDatabase extends BaseObserver {
10692
10975
  * @returns A transaction of CRUD operations to upload, or null if there are none
10693
10976
  */
10694
10977
  async getNextCrudTransaction() {
10695
- const iterator = this.getCrudTransactions()[symbolAsyncIterator]();
10978
+ const iterator = this.getCrudTransactions()[Symbol.asyncIterator]();
10696
10979
  return (await iterator.next()).value;
10697
10980
  }
10698
10981
  /**
@@ -10728,7 +11011,7 @@ class AbstractPowerSyncDatabase extends BaseObserver {
10728
11011
  */
10729
11012
  getCrudTransactions() {
10730
11013
  return {
10731
- [symbolAsyncIterator]: () => {
11014
+ [Symbol.asyncIterator]: () => {
10732
11015
  let lastCrudItemId = -1;
10733
11016
  const sql = `
10734
11017
  WITH RECURSIVE crud_entries AS (
@@ -10791,6 +11074,10 @@ SELECT * FROM crud_entries;
10791
11074
  * Execute a SQL write (INSERT/UPDATE/DELETE) query
10792
11075
  * and optionally return results.
10793
11076
  *
11077
+ * When using the default client-side [JSON-based view system](https://docs.powersync.com/architecture/client-architecture#client-side-schema-and-sqlite-database-structure),
11078
+ * the returned result's `rowsAffected` may be `0` for successful `UPDATE` and `DELETE` statements.
11079
+ * Use a `RETURNING` clause and inspect `result.rows` when you need to confirm which rows changed.
11080
+ *
10794
11081
  * @param sql The SQL query to execute
10795
11082
  * @param parameters Optional array of parameters to bind to the query
10796
11083
  * @returns The query result as an object with structured key-value pairs
@@ -10887,7 +11174,7 @@ SELECT * FROM crud_entries;
10887
11174
  async readTransaction(callback, lockTimeout = DEFAULT_LOCK_TIMEOUT_MS) {
10888
11175
  await this.waitForReady();
10889
11176
  return this.database.readTransaction(async (tx) => {
10890
- const res = await callback({ ...tx });
11177
+ const res = await callback(tx);
10891
11178
  await tx.rollback();
10892
11179
  return res;
10893
11180
  }, { timeoutMs: lockTimeout });
@@ -11884,6 +12171,8 @@ exports.ControlledExecutor = ControlledExecutor;
11884
12171
  exports.CrudBatch = CrudBatch;
11885
12172
  exports.CrudEntry = CrudEntry;
11886
12173
  exports.CrudTransaction = CrudTransaction;
12174
+ exports.DBAdapterDefaultMixin = DBAdapterDefaultMixin;
12175
+ exports.DBGetUtilsDefaultMixin = DBGetUtilsDefaultMixin;
11887
12176
  exports.DEFAULT_CRUD_BATCH_LIMIT = DEFAULT_CRUD_BATCH_LIMIT;
11888
12177
  exports.DEFAULT_CRUD_UPLOAD_THROTTLE_MS = DEFAULT_CRUD_UPLOAD_THROTTLE_MS;
11889
12178
  exports.DEFAULT_INDEX_COLUMN_OPTIONS = DEFAULT_INDEX_COLUMN_OPTIONS;
@@ -11891,7 +12180,6 @@ exports.DEFAULT_INDEX_OPTIONS = DEFAULT_INDEX_OPTIONS;
11891
12180
  exports.DEFAULT_LOCK_TIMEOUT_MS = DEFAULT_LOCK_TIMEOUT_MS;
11892
12181
  exports.DEFAULT_POWERSYNC_CLOSE_OPTIONS = DEFAULT_POWERSYNC_CLOSE_OPTIONS;
11893
12182
  exports.DEFAULT_POWERSYNC_DB_OPTIONS = DEFAULT_POWERSYNC_DB_OPTIONS;
11894
- exports.DEFAULT_PRESSURE_LIMITS = DEFAULT_PRESSURE_LIMITS;
11895
12183
  exports.DEFAULT_REMOTE_LOGGER = DEFAULT_REMOTE_LOGGER;
11896
12184
  exports.DEFAULT_REMOTE_OPTIONS = DEFAULT_REMOTE_OPTIONS;
11897
12185
  exports.DEFAULT_RETRY_DELAY_MS = DEFAULT_RETRY_DELAY_MS;
@@ -11902,7 +12190,6 @@ exports.DEFAULT_SYNC_CLIENT_IMPLEMENTATION = DEFAULT_SYNC_CLIENT_IMPLEMENTATION;
11902
12190
  exports.DEFAULT_TABLE_OPTIONS = DEFAULT_TABLE_OPTIONS;
11903
12191
  exports.DEFAULT_WATCH_QUERY_OPTIONS = DEFAULT_WATCH_QUERY_OPTIONS;
11904
12192
  exports.DEFAULT_WATCH_THROTTLE_MS = DEFAULT_WATCH_THROTTLE_MS;
11905
- exports.DataStream = DataStream;
11906
12193
  exports.DifferentialQueryProcessor = DifferentialQueryProcessor;
11907
12194
  exports.EMPTY_DIFFERENTIAL = EMPTY_DIFFERENTIAL;
11908
12195
  exports.FalsyComparator = FalsyComparator;
@@ -11915,10 +12202,12 @@ exports.LogLevel = LogLevel;
11915
12202
  exports.MAX_AMOUNT_OF_COLUMNS = MAX_AMOUNT_OF_COLUMNS;
11916
12203
  exports.MAX_OP_ID = MAX_OP_ID;
11917
12204
  exports.MEMORY_TRIGGER_CLAIM_MANAGER = MEMORY_TRIGGER_CLAIM_MANAGER;
12205
+ exports.Mutex = Mutex;
11918
12206
  exports.OnChangeQueryProcessor = OnChangeQueryProcessor;
11919
12207
  exports.OpType = OpType;
11920
12208
  exports.OplogEntry = OplogEntry;
11921
12209
  exports.Schema = Schema;
12210
+ exports.Semaphore = Semaphore;
11922
12211
  exports.SqliteBucketStorage = SqliteBucketStorage;
11923
12212
  exports.SyncDataBatch = SyncDataBatch;
11924
12213
  exports.SyncDataBucket = SyncDataBucket;
@@ -11948,9 +12237,9 @@ exports.isStreamingSyncCheckpointDiff = isStreamingSyncCheckpointDiff;
11948
12237
  exports.isStreamingSyncCheckpointPartiallyComplete = isStreamingSyncCheckpointPartiallyComplete;
11949
12238
  exports.isStreamingSyncData = isStreamingSyncData;
11950
12239
  exports.isSyncNewCheckpointRequest = isSyncNewCheckpointRequest;
11951
- exports.mutexRunExclusive = mutexRunExclusive;
11952
12240
  exports.parseQuery = parseQuery;
11953
12241
  exports.runOnSchemaChange = runOnSchemaChange;
11954
12242
  exports.sanitizeSQL = sanitizeSQL;
11955
12243
  exports.sanitizeUUID = sanitizeUUID;
12244
+ exports.timeoutSignal = timeoutSignal;
11956
12245
  //# sourceMappingURL=bundle.node.cjs.map