@tanstack/electric-db-collection 0.2.3 → 0.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/electric.ts CHANGED
@@ -40,6 +40,22 @@ export { isChangeMessage, isControlMessage } from "@electric-sql/client"
40
40
 
41
41
  const debug = DebugModule.debug(`ts/db:electric`)
42
42
 
43
+ /**
44
+ * Symbol for internal test hooks (hidden from public API)
45
+ */
46
+ export const ELECTRIC_TEST_HOOKS = Symbol(`electricTestHooks`)
47
+
48
+ /**
49
+ * Internal test hooks interface (for testing only)
50
+ */
51
+ export interface ElectricTestHooks {
52
+ /**
53
+ * Called before marking collection ready after first up-to-date in progressive mode
54
+ * Allows tests to pause and validate snapshot phase before atomic swap completes
55
+ */
56
+ beforeMarkingReady?: () => Promise<void>
57
+ }
58
+
43
59
  /**
44
60
  * Type representing a transaction ID in ElectricSQL
45
61
  */
@@ -118,6 +134,12 @@ export interface ElectricCollectionConfig<
118
134
  shapeOptions: ShapeStreamOptions<GetExtensions<T>>
119
135
  syncMode?: ElectricSyncMode
120
136
 
137
+ /**
138
+ * Internal test hooks (for testing only)
139
+ * Hidden via Symbol to prevent accidental usage in production
140
+ */
141
+ [ELECTRIC_TEST_HOOKS]?: ElectricTestHooks
142
+
121
143
  /**
122
144
  * Optional asynchronous handler function called before an insert operation
123
145
  * @param params Object containing transaction and collection information
@@ -257,6 +279,93 @@ function hasTxids<T extends Row<unknown>>(
257
279
  return `txids` in message.headers && Array.isArray(message.headers.txids)
258
280
  }
259
281
 
282
+ /**
283
+ * Creates a deduplicated loadSubset handler for progressive/on-demand modes
284
+ * Returns null for eager mode, or a DeduplicatedLoadSubset instance for other modes.
285
+ * Handles fetching snapshots in progressive mode during buffering phase,
286
+ * and requesting snapshots in on-demand mode
287
+ */
288
+ function createLoadSubsetDedupe<T extends Row<unknown>>({
289
+ stream,
290
+ syncMode,
291
+ isBufferingInitialSync,
292
+ begin,
293
+ write,
294
+ commit,
295
+ collectionId,
296
+ }: {
297
+ stream: ShapeStream<T>
298
+ syncMode: ElectricSyncMode
299
+ isBufferingInitialSync: () => boolean
300
+ begin: () => void
301
+ write: (mutation: {
302
+ type: `insert` | `update` | `delete`
303
+ value: T
304
+ metadata: Record<string, unknown>
305
+ }) => void
306
+ commit: () => void
307
+ collectionId?: string
308
+ }): DeduplicatedLoadSubset | null {
309
+ // Eager mode doesn't need subset loading
310
+ if (syncMode === `eager`) {
311
+ return null
312
+ }
313
+
314
+ const loadSubset = async (opts: LoadSubsetOptions) => {
315
+ // In progressive mode, use fetchSnapshot during snapshot phase
316
+ if (isBufferingInitialSync()) {
317
+ // Progressive mode snapshot phase: fetch and apply immediately
318
+ const snapshotParams = compileSQL<T>(opts)
319
+ try {
320
+ const { data: rows } = await stream.fetchSnapshot(snapshotParams)
321
+
322
+ // Check again if we're still buffering - we might have received up-to-date
323
+ // and completed the atomic swap while waiting for the snapshot
324
+ if (!isBufferingInitialSync()) {
325
+ debug(
326
+ `${collectionId ? `[${collectionId}] ` : ``}Ignoring snapshot - sync completed while fetching`
327
+ )
328
+ return
329
+ }
330
+
331
+ // Apply snapshot data in a sync transaction (only if we have data)
332
+ if (rows.length > 0) {
333
+ begin()
334
+ for (const row of rows) {
335
+ write({
336
+ type: `insert`,
337
+ value: row.value,
338
+ metadata: {
339
+ ...row.headers,
340
+ },
341
+ })
342
+ }
343
+ commit()
344
+
345
+ debug(
346
+ `${collectionId ? `[${collectionId}] ` : ``}Applied snapshot with ${rows.length} rows`
347
+ )
348
+ }
349
+ } catch (error) {
350
+ debug(
351
+ `${collectionId ? `[${collectionId}] ` : ``}Error fetching snapshot: %o`,
352
+ error
353
+ )
354
+ throw error
355
+ }
356
+ } else if (syncMode === `progressive`) {
357
+ // Progressive mode after full sync complete: no need to load more
358
+ return
359
+ } else {
360
+ // On-demand mode: use requestSnapshot
361
+ const snapshotParams = compileSQL<T>(opts)
362
+ await stream.requestSnapshot(snapshotParams)
363
+ }
364
+ }
365
+
366
+ return new DeduplicatedLoadSubset({ loadSubset })
367
+ }
368
+
260
369
  /**
261
370
  * Type for the awaitTxId utility function
262
371
  */
@@ -379,6 +488,7 @@ export function electricCollectionOptions(
379
488
  removePendingMatches,
380
489
  resolveMatchedPendingMatches,
381
490
  collectionId: config.id,
491
+ testHooks: config[ELECTRIC_TEST_HOOKS],
382
492
  })
383
493
 
384
494
  /**
@@ -631,6 +741,7 @@ function createElectricSync<T extends Row<unknown>>(
631
741
  removePendingMatches: (matchIds: Array<string>) => void
632
742
  resolveMatchedPendingMatches: () => void
633
743
  collectionId?: string
744
+ testHooks?: ElectricTestHooks
634
745
  }
635
746
  ): SyncConfig<T> {
636
747
  const {
@@ -642,6 +753,7 @@ function createElectricSync<T extends Row<unknown>>(
642
753
  removePendingMatches,
643
754
  resolveMatchedPendingMatches,
644
755
  collectionId,
756
+ testHooks,
645
757
  } = options
646
758
  const MAX_BATCH_MESSAGES = 1000 // Safety limit for message buffer
647
759
 
@@ -669,6 +781,26 @@ function createElectricSync<T extends Row<unknown>>(
669
781
  sync: (params: Parameters<SyncConfig<T>[`sync`]>[0]) => {
670
782
  const { begin, write, commit, markReady, truncate, collection } = params
671
783
 
784
+ // Wrap markReady to wait for test hook in progressive mode
785
+ let progressiveReadyGate: Promise<void> | null = null
786
+ const wrappedMarkReady = (isBuffering: boolean) => {
787
+ // Only create gate if we're in buffering phase (first up-to-date)
788
+ if (
789
+ isBuffering &&
790
+ syncMode === `progressive` &&
791
+ testHooks?.beforeMarkingReady
792
+ ) {
793
+ // Create a new gate promise for this sync cycle
794
+ progressiveReadyGate = testHooks.beforeMarkingReady()
795
+ progressiveReadyGate.then(() => {
796
+ markReady()
797
+ })
798
+ } else {
799
+ // No hook, not buffering, or already past first up-to-date
800
+ markReady()
801
+ }
802
+ }
803
+
672
804
  // Abort controller for the stream - wraps the signal if provided
673
805
  const abortController = new AbortController()
674
806
 
@@ -734,22 +866,24 @@ function createElectricSync<T extends Row<unknown>>(
734
866
  const newSnapshots: Array<PostgresSnapshot> = []
735
867
  let hasReceivedUpToDate = false // Track if we've completed initial sync in progressive mode
736
868
 
869
+ // Progressive mode state
870
+ // Helper to determine if we're buffering the initial sync
871
+ const isBufferingInitialSync = () =>
872
+ syncMode === `progressive` && !hasReceivedUpToDate
873
+ const bufferedMessages: Array<Message<T>> = [] // Buffer change messages during initial sync
874
+
737
875
  // Create deduplicated loadSubset wrapper for non-eager modes
738
876
  // This prevents redundant snapshot requests when multiple concurrent
739
877
  // live queries request overlapping or subset predicates
740
- const loadSubsetDedupe =
741
- syncMode === `eager`
742
- ? null
743
- : new DeduplicatedLoadSubset({
744
- loadSubset: async (opts: LoadSubsetOptions) => {
745
- // In progressive mode, stop requesting snapshots once full sync is complete
746
- if (syncMode === `progressive` && hasReceivedUpToDate) {
747
- return
748
- }
749
- const snapshotParams = compileSQL<T>(opts)
750
- await stream.requestSnapshot(snapshotParams)
751
- },
752
- })
878
+ const loadSubsetDedupe = createLoadSubsetDedupe({
879
+ stream,
880
+ syncMode,
881
+ isBufferingInitialSync,
882
+ begin,
883
+ write,
884
+ commit,
885
+ collectionId,
886
+ })
753
887
 
754
888
  unsubscribeStream = stream.subscribe((messages: Array<Message<T>>) => {
755
889
  let hasUpToDate = false
@@ -769,7 +903,8 @@ function createElectricSync<T extends Row<unknown>>(
769
903
  }
770
904
 
771
905
  // Check for txids in the message and add them to our store
772
- if (hasTxids(message)) {
906
+ // Skip during buffered initial sync in progressive mode (txids will be extracted during atomic swap)
907
+ if (hasTxids(message) && !isBufferingInitialSync()) {
773
908
  message.headers.txids?.forEach((txid) => newTxids.add(txid))
774
909
  }
775
910
 
@@ -803,21 +938,30 @@ function createElectricSync<T extends Row<unknown>>(
803
938
  relationSchema.setState(() => schema)
804
939
  }
805
940
 
806
- if (!transactionStarted) {
807
- begin()
808
- transactionStarted = true
809
- }
941
+ // In buffered initial sync of progressive mode, buffer messages instead of writing
942
+ if (isBufferingInitialSync()) {
943
+ bufferedMessages.push(message)
944
+ } else {
945
+ // Normal processing: write changes immediately
946
+ if (!transactionStarted) {
947
+ begin()
948
+ transactionStarted = true
949
+ }
810
950
 
811
- write({
812
- type: message.headers.operation,
813
- value: message.value,
814
- // Include the primary key and relation info in the metadata
815
- metadata: {
816
- ...message.headers,
817
- },
818
- })
951
+ write({
952
+ type: message.headers.operation,
953
+ value: message.value,
954
+ // Include the primary key and relation info in the metadata
955
+ metadata: {
956
+ ...message.headers,
957
+ },
958
+ })
959
+ }
819
960
  } else if (isSnapshotEndMessage(message)) {
820
- newSnapshots.push(parseSnapshotMessage(message))
961
+ // Skip snapshot-end tracking during buffered initial sync (will be extracted during atomic swap)
962
+ if (!isBufferingInitialSync()) {
963
+ newSnapshots.push(parseSnapshotMessage(message))
964
+ }
821
965
  hasSnapshotEnd = true
822
966
  } else if (isUpToDateMessage(message)) {
823
967
  hasUpToDate = true
@@ -841,23 +985,71 @@ function createElectricSync<T extends Row<unknown>>(
841
985
  // Reset flags so we continue accumulating changes until next up-to-date
842
986
  hasUpToDate = false
843
987
  hasSnapshotEnd = false
844
- hasReceivedUpToDate = false // Reset for progressive mode - we're starting a new sync
988
+ hasReceivedUpToDate = false // Reset for progressive mode (isBufferingInitialSync will reflect this)
989
+ bufferedMessages.length = 0 // Clear buffered messages
845
990
  }
846
991
  }
847
992
 
848
993
  if (hasUpToDate || hasSnapshotEnd) {
849
- // Clear the current batch buffer since we're now up-to-date
850
- currentBatchMessages.setState(() => [])
994
+ // PROGRESSIVE MODE: Atomic swap on first up-to-date
995
+ if (isBufferingInitialSync() && hasUpToDate) {
996
+ debug(
997
+ `${collectionId ? `[${collectionId}] ` : ``}Progressive mode: Performing atomic swap with ${bufferedMessages.length} buffered messages`
998
+ )
999
+
1000
+ // Start atomic swap transaction
1001
+ begin()
1002
+
1003
+ // Truncate to clear all snapshot data
1004
+ truncate()
1005
+
1006
+ // Apply all buffered change messages and extract txids/snapshots
1007
+ for (const bufferedMsg of bufferedMessages) {
1008
+ if (isChangeMessage(bufferedMsg)) {
1009
+ write({
1010
+ type: bufferedMsg.headers.operation,
1011
+ value: bufferedMsg.value,
1012
+ metadata: {
1013
+ ...bufferedMsg.headers,
1014
+ },
1015
+ })
1016
+
1017
+ // Extract txids from buffered messages (will be committed to store after transaction)
1018
+ if (hasTxids(bufferedMsg)) {
1019
+ bufferedMsg.headers.txids?.forEach((txid) =>
1020
+ newTxids.add(txid)
1021
+ )
1022
+ }
1023
+ } else if (isSnapshotEndMessage(bufferedMsg)) {
1024
+ // Extract snapshots from buffered messages (will be committed to store after transaction)
1025
+ newSnapshots.push(parseSnapshotMessage(bufferedMsg))
1026
+ }
1027
+ }
851
1028
 
852
- // Commit transaction if one was started
853
- if (transactionStarted) {
1029
+ // Commit the atomic swap
854
1030
  commit()
855
- transactionStarted = false
1031
+
1032
+ // Exit buffering phase by marking that we've received up-to-date
1033
+ // isBufferingInitialSync() will now return false
1034
+ bufferedMessages.length = 0
1035
+
1036
+ debug(
1037
+ `${collectionId ? `[${collectionId}] ` : ``}Progressive mode: Atomic swap complete, now in normal sync mode`
1038
+ )
1039
+ } else {
1040
+ // Normal mode or on-demand: commit transaction if one was started
1041
+ if (transactionStarted) {
1042
+ commit()
1043
+ transactionStarted = false
1044
+ }
856
1045
  }
857
1046
 
1047
+ // Clear the current batch buffer since we're now up-to-date
1048
+ currentBatchMessages.setState(() => [])
1049
+
858
1050
  if (hasUpToDate || (hasSnapshotEnd && syncMode === `on-demand`)) {
859
1051
  // Mark the collection as ready now that sync is up to date
860
- markReady()
1052
+ wrappedMarkReady(isBufferingInitialSync())
861
1053
  }
862
1054
 
863
1055
  // Track that we've received the first up-to-date for progressive mode
@@ -183,7 +183,18 @@ function compileFunction(
183
183
  }
184
184
 
185
185
  function isBinaryOp(name: string): boolean {
186
- const binaryOps = [`eq`, `gt`, `gte`, `lt`, `lte`, `and`, `or`, `in`]
186
+ const binaryOps = [
187
+ `eq`,
188
+ `gt`,
189
+ `gte`,
190
+ `lt`,
191
+ `lte`,
192
+ `and`,
193
+ `or`,
194
+ `in`,
195
+ `like`,
196
+ `ilike`,
197
+ ]
187
198
  return binaryOps.includes(name)
188
199
  }
189
200